rewardpilot-web-ui / utils /llm_explainer.py
sammy786's picture
Update utils/llm_explainer.py
f3df99f verified
raw
history blame
15.3 kB
"""
LLM-powered explanation generator for RewardPilot recommendations.
Uses Hugging Face Inference API with Llama 3.2 for natural language explanations.
"""
from huggingface_hub import InferenceClient
import os
from typing import Dict, List, Optional
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LLMExplainer:
"""Generate natural language explanations for credit card recommendations using LLM"""
def __init__(self, model: str = "meta-llama/Llama-3.2-3B-Instruct"):
"""
Initialize LLM explainer with Hugging Face Inference API
Args:
model: HuggingFace model ID to use for generation
"""
self.model = model
self.client = None
# Try to initialize with token
hf_token = os.getenv("HF_TOKEN", "")
if hf_token:
try:
self.client = InferenceClient(token=hf_token)
# Test the connection
logger.info(f"βœ… LLM Explainer initialized with model: {model}")
except Exception as e:
logger.warning(f"⚠️ Could not initialize HF client: {e}")
self.client = None
else:
logger.warning("⚠️ No HF_TOKEN found. LLM explanations will use fallback mode.")
def explain_recommendation(
self,
card: str,
rewards: float,
rewards_rate: str,
merchant: str,
category: str,
amount: float,
warnings: Optional[List[str]] = None,
annual_potential: Optional[float] = None,
alternatives: Optional[List[Dict]] = None
) -> str:
"""
Generate natural language explanation for a card recommendation
Args:
card: Recommended card name
rewards: Rewards earned for this transaction
rewards_rate: Rewards rate (e.g., "4x points")
merchant: Merchant name
category: Transaction category
amount: Transaction amount
warnings: List of warning messages
annual_potential: Annual rewards potential
alternatives: Alternative card options
Returns:
Natural language explanation string
"""
# Fallback if LLM not available
if not self.client:
return self._generate_fallback_explanation(
card, rewards, rewards_rate, merchant, category, amount, warnings
)
# Build context-aware prompt
prompt = self._build_prompt(
card, rewards, rewards_rate, merchant, category, amount,
warnings, annual_potential, alternatives
)
try:
# Generate explanation using LLM with correct API
messages = [
{
"role": "system",
"content": "You are a friendly credit card rewards expert who provides concise, helpful explanations."
},
{
"role": "user",
"content": prompt
}
]
response = self.client.chat_completion(
messages=messages,
model=self.model,
max_tokens=200,
temperature=0.7,
top_p=0.9
)
# Extract response text
explanation = response.choices[0].message.content.strip()
logger.info(f"βœ… Generated LLM explanation for {card}")
return explanation
except Exception as e:
logger.error(f"❌ LLM generation failed: {e}")
return self._generate_fallback_explanation(
card, rewards, rewards_rate, merchant, category, amount, warnings
)
def _build_prompt(
self,
card: str,
rewards: float,
rewards_rate: str,
merchant: str,
category: str,
amount: float,
warnings: Optional[List[str]],
annual_potential: Optional[float],
alternatives: Optional[List[Dict]]
) -> str:
"""Build optimized prompt for LLM"""
prompt = f"""Explain why this credit card is the best choice for this purchase.
Transaction Details:
- Merchant: {merchant}
- Category: {category}
- Amount: ${amount:.2f}
Recommendation:
- Best Card: {card}
- Rewards Earned: ${rewards:.2f} ({rewards_rate})
"""
if annual_potential:
prompt += f"- Annual Potential: ${annual_potential:.2f} in this category\n"
if warnings:
prompt += f"- Important Warning: {warnings[0]}\n"
if alternatives and len(alternatives) > 0:
alt_text = ", ".join([f"{alt['card']} (${alt['rewards']:.2f})" for alt in alternatives[:2]])
prompt += f"- Alternatives: {alt_text}\n"
prompt += """
Provide a friendly, concise explanation (2-3 sentences) that:
1. Explains why this card is the best choice
2. Highlights the key benefit
3. Mentions any important warnings if present
Keep it conversational and helpful."""
return prompt
def _generate_fallback_explanation(
self,
card: str,
rewards: float,
rewards_rate: str,
merchant: str,
category: str,
amount: float,
warnings: Optional[List[str]]
) -> str:
"""Generate rule-based explanation when LLM is unavailable"""
explanation = f"The **{card}** is your best choice for this {category.lower()} purchase at {merchant}. "
explanation += f"You'll earn **{rewards_rate}**, which gives you the highest rewards rate among your cards. "
if warnings:
explanation += f"\n\n⚠️ **Note:** {warnings[0]}"
else:
explanation += "This optimizes your rewards while staying within spending caps."
return explanation
def generate_spending_insights(
self,
user_id: str,
total_spending: float,
total_rewards: float,
optimization_score: int,
top_categories: List[Dict],
recommendations_count: int
) -> str:
"""
Generate personalized spending insights for analytics dashboard
Args:
user_id: User identifier
total_spending: Total spending amount
total_rewards: Total rewards earned
optimization_score: Optimization score (0-100)
top_categories: List of top spending categories
recommendations_count: Number of optimized transactions
Returns:
Personalized insights text
"""
if not self.client:
return self._generate_fallback_insights(
total_spending, total_rewards, optimization_score
)
prompt = f"""Analyze this user's credit card spending and provide personalized insights.
User Spending Summary:
- Total Spending: ${total_spending:.2f}
- Total Rewards: ${total_rewards:.2f}
- Optimization Score: {optimization_score}/100
- Optimized Transactions: {recommendations_count}
- Top Categories: {', '.join([cat['category'] for cat in top_categories[:3]])}
Provide 2-3 actionable insights about:
1. Their optimization performance
2. Opportunities to earn more rewards
3. One specific tip to improve their score
Be encouraging and specific. Keep it under 100 words."""
try:
messages = [
{
"role": "system",
"content": "You are a financial advisor specializing in credit card rewards optimization."
},
{
"role": "user",
"content": prompt
}
]
response = self.client.chat_completion(
messages=messages,
model=self.model,
max_tokens=150,
temperature=0.8
)
return response.choices[0].message.content.strip()
except Exception as e:
logger.error(f"❌ Insights generation failed: {e}")
return self._generate_fallback_insights(
total_spending, total_rewards, optimization_score
)
def _generate_fallback_insights(
self,
total_spending: float,
total_rewards: float,
optimization_score: int
) -> str:
"""Generate rule-based insights when LLM unavailable"""
rewards_rate = (total_rewards / total_spending * 100) if total_spending > 0 else 0
insights = f"You're earning **${total_rewards:.2f}** in rewards on **${total_spending:.2f}** of spending "
insights += f"(**{rewards_rate:.1f}%** effective rate). "
if optimization_score >= 80:
insights += "🌟 **Excellent optimization!** You're maximizing your rewards effectively. "
elif optimization_score >= 60:
insights += "πŸ‘ **Good progress!** Consider using our recommendations more consistently. "
else:
insights += "πŸ’‘ **Room for improvement!** Follow our card suggestions to boost your rewards. "
insights += "Keep tracking your spending to identify new optimization opportunities."
return insights
def chat_response(
self,
user_message: str,
user_context: Dict,
chat_history: List[tuple] = None
) -> str:
"""
Generate conversational response for chat interface
Args:
user_message: User's question/message
user_context: User's spending data and card portfolio
chat_history: Previous conversation history
Returns:
AI assistant response
"""
if not self.client:
return self._generate_fallback_chat(user_message, user_context)
# Build context from user data
context = f"""User Profile:
- Cards: {', '.join(user_context.get('cards', ['Unknown']))}
- Monthly Spending: ${user_context.get('monthly_spending', 0):.2f}
- Top Category: {user_context.get('top_category', 'Unknown')}
"""
# Build messages with history
messages = [
{
"role": "system",
"content": f"You are RewardPilot AI, a helpful credit card rewards assistant.\n\n{context}"
}
]
# Add chat history
if chat_history:
for user_msg, assistant_msg in chat_history[-3:]: # Last 3 exchanges
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
# Add current message
messages.append({"role": "user", "content": user_message})
try:
response = self.client.chat_completion(
messages=messages,
model=self.model,
max_tokens=200,
temperature=0.8
)
return response.choices[0].message.content.strip()
except Exception as e:
logger.error(f"❌ Chat response failed: {e}")
return self._generate_fallback_chat(user_message, user_context)
def _generate_fallback_chat(self, user_message: str, user_context: Dict) -> str:
"""Generate rule-based chat response when LLM unavailable"""
message_lower = user_message.lower()
# Greeting
if any(word in message_lower for word in ['hello', 'hi', 'hey', 'greetings']):
return "Hello! πŸ‘‹ I'm RewardPilot AI. I can help you choose the best credit card for any purchase. What would you like to know?"
# Card-specific questions
if 'amex gold' in message_lower or 'american express gold' in message_lower:
return "The **Amex Gold** is excellent for dining and groceries, earning **4x points** in both categories. It has a $250 annual fee but comes with dining credits. Best for foodies! 🍽️"
if 'chase sapphire' in message_lower:
return "The **Chase Sapphire Reserve** is a premium travel card earning **3x points** on travel and dining. It has a $550 annual fee but offers travel credits and lounge access. Perfect for frequent travelers! ✈️"
if 'costco' in message_lower:
return "The **Costco Anywhere Visa** offers **4% cashback** on gas (up to $7,000/year), 3% on restaurants and travel, 2% at Costco, and 1% elsewhere. No annual fee beyond Costco membership! β›½"
# Category questions
if 'grocery' in message_lower or 'groceries' in message_lower:
return "For groceries, I recommend:\n\n1. **Amex Gold** - 4x points\n2. **Blue Cash Preferred** - 6% cashback (up to $6,000/year)\n3. **Chase Freedom Flex** - 5% in rotating categories\n\nWhich sounds best for you? πŸ›’"
if 'dining' in message_lower or 'restaurant' in message_lower:
return "For dining, top choices are:\n\n1. **Capital One Savor** - 4% cashback\n2. **Amex Gold** - 4x points\n3. **Chase Sapphire Preferred** - 3x points\n\nAll great options! 🍴"
if 'travel' in message_lower:
return "For travel, consider:\n\n1. **Chase Sapphire Reserve** - 3x points\n2. **Amex Platinum** - 5x points on flights\n3. **Capital One Venture X** - 2x miles everywhere\n\nDepends on your travel style! ✈️"
if 'gas' in message_lower:
return "For gas stations:\n\n1. **Costco Visa** - 4% cashback\n2. **BofA Customized Cash** - 3% in your choice category\n3. **Citi Custom Cash** - 5% on top category (up to $500/month)\n\nSave at the pump! β›½"
# Optimization
if 'optimize' in message_lower or 'maximize' in message_lower:
return "To optimize your rewards:\n\n1. βœ… Use category-specific cards\n2. βœ… Have a 2% cashback baseline card\n3. βœ… Track spending caps\n4. βœ… Consider annual fees vs. rewards\n\nUse the 'Get Recommendation' tab for personalized advice!"
# Help
if 'help' in message_lower or 'what can you do' in message_lower:
return "I can help you with:\n\nπŸ’³ Choosing the best card for specific merchants\nπŸ“Š Comparing card benefits\n🎯 Understanding rewards rates\nπŸ’° Optimizing your wallet strategy\n\nWhat would you like to know?"
# Default response
return "I can help you find the best credit card for any purchase! Try asking:\n\nβ€’ 'Which card for groceries?'\nβ€’ 'Tell me about Chase Sapphire Reserve'\nβ€’ 'How can I maximize rewards?'\n\nWhat would you like to know? πŸ€”"
# Singleton instance
_llm_explainer = None
def get_llm_explainer() -> LLMExplainer:
"""Get or create singleton LLM explainer instance"""
global _llm_explainer
if _llm_explainer is None:
_llm_explainer = LLMExplainer()
return _llm_explainer