|
|
""" |
|
|
LLM-powered explanation generator for RewardPilot recommendations. |
|
|
Uses Hugging Face Inference API with Llama 3.2 for natural language explanations. |
|
|
""" |
|
|
from huggingface_hub import InferenceClient |
|
|
import os |
|
|
from typing import Dict, List, Optional |
|
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
class LLMExplainer: |
|
|
"""Generate natural language explanations for credit card recommendations using LLM""" |
|
|
|
|
|
def __init__(self, model: str = "meta-llama/Llama-3.2-3B-Instruct"): |
|
|
""" |
|
|
Initialize LLM explainer with Hugging Face Inference API |
|
|
|
|
|
Args: |
|
|
model: HuggingFace model ID to use for generation |
|
|
""" |
|
|
self.model = model |
|
|
self.client = None |
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN", "") |
|
|
|
|
|
if hf_token: |
|
|
try: |
|
|
self.client = InferenceClient(token=hf_token) |
|
|
|
|
|
logger.info(f"β
LLM Explainer initialized with model: {model}") |
|
|
except Exception as e: |
|
|
logger.warning(f"β οΈ Could not initialize HF client: {e}") |
|
|
self.client = None |
|
|
else: |
|
|
logger.warning("β οΈ No HF_TOKEN found. LLM explanations will use fallback mode.") |
|
|
|
|
|
def explain_recommendation( |
|
|
self, |
|
|
card: str, |
|
|
rewards: float, |
|
|
rewards_rate: str, |
|
|
merchant: str, |
|
|
category: str, |
|
|
amount: float, |
|
|
warnings: Optional[List[str]] = None, |
|
|
annual_potential: Optional[float] = None, |
|
|
alternatives: Optional[List[Dict]] = None |
|
|
) -> str: |
|
|
""" |
|
|
Generate natural language explanation for a card recommendation |
|
|
|
|
|
Args: |
|
|
card: Recommended card name |
|
|
rewards: Rewards earned for this transaction |
|
|
rewards_rate: Rewards rate (e.g., "4x points") |
|
|
merchant: Merchant name |
|
|
category: Transaction category |
|
|
amount: Transaction amount |
|
|
warnings: List of warning messages |
|
|
annual_potential: Annual rewards potential |
|
|
alternatives: Alternative card options |
|
|
|
|
|
Returns: |
|
|
Natural language explanation string |
|
|
""" |
|
|
|
|
|
|
|
|
if not self.client: |
|
|
return self._generate_fallback_explanation( |
|
|
card, rewards, rewards_rate, merchant, category, amount, warnings |
|
|
) |
|
|
|
|
|
|
|
|
prompt = self._build_prompt( |
|
|
card, rewards, rewards_rate, merchant, category, amount, |
|
|
warnings, annual_potential, alternatives |
|
|
) |
|
|
|
|
|
try: |
|
|
|
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": "You are a friendly credit card rewards expert who provides concise, helpful explanations." |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": prompt |
|
|
} |
|
|
] |
|
|
|
|
|
response = self.client.chat_completion( |
|
|
messages=messages, |
|
|
model=self.model, |
|
|
max_tokens=200, |
|
|
temperature=0.7, |
|
|
top_p=0.9 |
|
|
) |
|
|
|
|
|
|
|
|
explanation = response.choices[0].message.content.strip() |
|
|
|
|
|
logger.info(f"β
Generated LLM explanation for {card}") |
|
|
return explanation |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"β LLM generation failed: {e}") |
|
|
return self._generate_fallback_explanation( |
|
|
card, rewards, rewards_rate, merchant, category, amount, warnings |
|
|
) |
|
|
|
|
|
def _build_prompt( |
|
|
self, |
|
|
card: str, |
|
|
rewards: float, |
|
|
rewards_rate: str, |
|
|
merchant: str, |
|
|
category: str, |
|
|
amount: float, |
|
|
warnings: Optional[List[str]], |
|
|
annual_potential: Optional[float], |
|
|
alternatives: Optional[List[Dict]] |
|
|
) -> str: |
|
|
"""Build optimized prompt for LLM""" |
|
|
|
|
|
prompt = f"""Explain why this credit card is the best choice for this purchase. |
|
|
|
|
|
Transaction Details: |
|
|
- Merchant: {merchant} |
|
|
- Category: {category} |
|
|
- Amount: ${amount:.2f} |
|
|
|
|
|
Recommendation: |
|
|
- Best Card: {card} |
|
|
- Rewards Earned: ${rewards:.2f} ({rewards_rate}) |
|
|
""" |
|
|
|
|
|
if annual_potential: |
|
|
prompt += f"- Annual Potential: ${annual_potential:.2f} in this category\n" |
|
|
|
|
|
if warnings: |
|
|
prompt += f"- Important Warning: {warnings[0]}\n" |
|
|
|
|
|
if alternatives and len(alternatives) > 0: |
|
|
alt_text = ", ".join([f"{alt['card']} (${alt['rewards']:.2f})" for alt in alternatives[:2]]) |
|
|
prompt += f"- Alternatives: {alt_text}\n" |
|
|
|
|
|
prompt += """ |
|
|
Provide a friendly, concise explanation (2-3 sentences) that: |
|
|
1. Explains why this card is the best choice |
|
|
2. Highlights the key benefit |
|
|
3. Mentions any important warnings if present |
|
|
|
|
|
Keep it conversational and helpful.""" |
|
|
|
|
|
return prompt |
|
|
|
|
|
def _generate_fallback_explanation( |
|
|
self, |
|
|
card: str, |
|
|
rewards: float, |
|
|
rewards_rate: str, |
|
|
merchant: str, |
|
|
category: str, |
|
|
amount: float, |
|
|
warnings: Optional[List[str]] |
|
|
) -> str: |
|
|
"""Generate rule-based explanation when LLM is unavailable""" |
|
|
|
|
|
explanation = f"The **{card}** is your best choice for this {category.lower()} purchase at {merchant}. " |
|
|
explanation += f"You'll earn **{rewards_rate}**, which gives you the highest rewards rate among your cards. " |
|
|
|
|
|
if warnings: |
|
|
explanation += f"\n\nβ οΈ **Note:** {warnings[0]}" |
|
|
else: |
|
|
explanation += "This optimizes your rewards while staying within spending caps." |
|
|
|
|
|
return explanation |
|
|
|
|
|
def generate_spending_insights( |
|
|
self, |
|
|
user_id: str, |
|
|
total_spending: float, |
|
|
total_rewards: float, |
|
|
optimization_score: int, |
|
|
top_categories: List[Dict], |
|
|
recommendations_count: int |
|
|
) -> str: |
|
|
""" |
|
|
Generate personalized spending insights for analytics dashboard |
|
|
|
|
|
Args: |
|
|
user_id: User identifier |
|
|
total_spending: Total spending amount |
|
|
total_rewards: Total rewards earned |
|
|
optimization_score: Optimization score (0-100) |
|
|
top_categories: List of top spending categories |
|
|
recommendations_count: Number of optimized transactions |
|
|
|
|
|
Returns: |
|
|
Personalized insights text |
|
|
""" |
|
|
|
|
|
if not self.client: |
|
|
return self._generate_fallback_insights( |
|
|
total_spending, total_rewards, optimization_score |
|
|
) |
|
|
|
|
|
prompt = f"""Analyze this user's credit card spending and provide personalized insights. |
|
|
|
|
|
User Spending Summary: |
|
|
- Total Spending: ${total_spending:.2f} |
|
|
- Total Rewards: ${total_rewards:.2f} |
|
|
- Optimization Score: {optimization_score}/100 |
|
|
- Optimized Transactions: {recommendations_count} |
|
|
- Top Categories: {', '.join([cat['category'] for cat in top_categories[:3]])} |
|
|
|
|
|
Provide 2-3 actionable insights about: |
|
|
1. Their optimization performance |
|
|
2. Opportunities to earn more rewards |
|
|
3. One specific tip to improve their score |
|
|
|
|
|
Be encouraging and specific. Keep it under 100 words.""" |
|
|
|
|
|
try: |
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": "You are a financial advisor specializing in credit card rewards optimization." |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": prompt |
|
|
} |
|
|
] |
|
|
|
|
|
response = self.client.chat_completion( |
|
|
messages=messages, |
|
|
model=self.model, |
|
|
max_tokens=150, |
|
|
temperature=0.8 |
|
|
) |
|
|
|
|
|
return response.choices[0].message.content.strip() |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"β Insights generation failed: {e}") |
|
|
return self._generate_fallback_insights( |
|
|
total_spending, total_rewards, optimization_score |
|
|
) |
|
|
|
|
|
def _generate_fallback_insights( |
|
|
self, |
|
|
total_spending: float, |
|
|
total_rewards: float, |
|
|
optimization_score: int |
|
|
) -> str: |
|
|
"""Generate rule-based insights when LLM unavailable""" |
|
|
|
|
|
rewards_rate = (total_rewards / total_spending * 100) if total_spending > 0 else 0 |
|
|
|
|
|
insights = f"You're earning **${total_rewards:.2f}** in rewards on **${total_spending:.2f}** of spending " |
|
|
insights += f"(**{rewards_rate:.1f}%** effective rate). " |
|
|
|
|
|
if optimization_score >= 80: |
|
|
insights += "π **Excellent optimization!** You're maximizing your rewards effectively. " |
|
|
elif optimization_score >= 60: |
|
|
insights += "π **Good progress!** Consider using our recommendations more consistently. " |
|
|
else: |
|
|
insights += "π‘ **Room for improvement!** Follow our card suggestions to boost your rewards. " |
|
|
|
|
|
insights += "Keep tracking your spending to identify new optimization opportunities." |
|
|
|
|
|
return insights |
|
|
|
|
|
def chat_response( |
|
|
self, |
|
|
user_message: str, |
|
|
user_context: Dict, |
|
|
chat_history: List[tuple] = None |
|
|
) -> str: |
|
|
""" |
|
|
Generate conversational response for chat interface |
|
|
|
|
|
Args: |
|
|
user_message: User's question/message |
|
|
user_context: User's spending data and card portfolio |
|
|
chat_history: Previous conversation history |
|
|
|
|
|
Returns: |
|
|
AI assistant response |
|
|
""" |
|
|
|
|
|
if not self.client: |
|
|
return self._generate_fallback_chat(user_message, user_context) |
|
|
|
|
|
|
|
|
context = f"""User Profile: |
|
|
- Cards: {', '.join(user_context.get('cards', ['Unknown']))} |
|
|
- Monthly Spending: ${user_context.get('monthly_spending', 0):.2f} |
|
|
- Top Category: {user_context.get('top_category', 'Unknown')} |
|
|
""" |
|
|
|
|
|
|
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": f"You are RewardPilot AI, a helpful credit card rewards assistant.\n\n{context}" |
|
|
} |
|
|
] |
|
|
|
|
|
|
|
|
if chat_history: |
|
|
for user_msg, assistant_msg in chat_history[-3:]: |
|
|
messages.append({"role": "user", "content": user_msg}) |
|
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": user_message}) |
|
|
|
|
|
try: |
|
|
response = self.client.chat_completion( |
|
|
messages=messages, |
|
|
model=self.model, |
|
|
max_tokens=200, |
|
|
temperature=0.8 |
|
|
) |
|
|
|
|
|
return response.choices[0].message.content.strip() |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"β Chat response failed: {e}") |
|
|
return self._generate_fallback_chat(user_message, user_context) |
|
|
|
|
|
def _generate_fallback_chat(self, user_message: str, user_context: Dict) -> str: |
|
|
"""Generate rule-based chat response when LLM unavailable""" |
|
|
|
|
|
message_lower = user_message.lower() |
|
|
|
|
|
|
|
|
if any(word in message_lower for word in ['hello', 'hi', 'hey', 'greetings']): |
|
|
return "Hello! π I'm RewardPilot AI. I can help you choose the best credit card for any purchase. What would you like to know?" |
|
|
|
|
|
|
|
|
if 'amex gold' in message_lower or 'american express gold' in message_lower: |
|
|
return "The **Amex Gold** is excellent for dining and groceries, earning **4x points** in both categories. It has a $250 annual fee but comes with dining credits. Best for foodies! π½οΈ" |
|
|
|
|
|
if 'chase sapphire' in message_lower: |
|
|
return "The **Chase Sapphire Reserve** is a premium travel card earning **3x points** on travel and dining. It has a $550 annual fee but offers travel credits and lounge access. Perfect for frequent travelers! βοΈ" |
|
|
|
|
|
if 'costco' in message_lower: |
|
|
return "The **Costco Anywhere Visa** offers **4% cashback** on gas (up to $7,000/year), 3% on restaurants and travel, 2% at Costco, and 1% elsewhere. No annual fee beyond Costco membership! β½" |
|
|
|
|
|
|
|
|
if 'grocery' in message_lower or 'groceries' in message_lower: |
|
|
return "For groceries, I recommend:\n\n1. **Amex Gold** - 4x points\n2. **Blue Cash Preferred** - 6% cashback (up to $6,000/year)\n3. **Chase Freedom Flex** - 5% in rotating categories\n\nWhich sounds best for you? π" |
|
|
|
|
|
if 'dining' in message_lower or 'restaurant' in message_lower: |
|
|
return "For dining, top choices are:\n\n1. **Capital One Savor** - 4% cashback\n2. **Amex Gold** - 4x points\n3. **Chase Sapphire Preferred** - 3x points\n\nAll great options! π΄" |
|
|
|
|
|
if 'travel' in message_lower: |
|
|
return "For travel, consider:\n\n1. **Chase Sapphire Reserve** - 3x points\n2. **Amex Platinum** - 5x points on flights\n3. **Capital One Venture X** - 2x miles everywhere\n\nDepends on your travel style! βοΈ" |
|
|
|
|
|
if 'gas' in message_lower: |
|
|
return "For gas stations:\n\n1. **Costco Visa** - 4% cashback\n2. **BofA Customized Cash** - 3% in your choice category\n3. **Citi Custom Cash** - 5% on top category (up to $500/month)\n\nSave at the pump! β½" |
|
|
|
|
|
|
|
|
if 'optimize' in message_lower or 'maximize' in message_lower: |
|
|
return "To optimize your rewards:\n\n1. β
Use category-specific cards\n2. β
Have a 2% cashback baseline card\n3. β
Track spending caps\n4. β
Consider annual fees vs. rewards\n\nUse the 'Get Recommendation' tab for personalized advice!" |
|
|
|
|
|
|
|
|
if 'help' in message_lower or 'what can you do' in message_lower: |
|
|
return "I can help you with:\n\nπ³ Choosing the best card for specific merchants\nπ Comparing card benefits\nπ― Understanding rewards rates\nπ° Optimizing your wallet strategy\n\nWhat would you like to know?" |
|
|
|
|
|
|
|
|
return "I can help you find the best credit card for any purchase! Try asking:\n\nβ’ 'Which card for groceries?'\nβ’ 'Tell me about Chase Sapphire Reserve'\nβ’ 'How can I maximize rewards?'\n\nWhat would you like to know? π€" |
|
|
|
|
|
|
|
|
|
|
|
_llm_explainer = None |
|
|
|
|
|
|
|
|
def get_llm_explainer() -> LLMExplainer: |
|
|
"""Get or create singleton LLM explainer instance""" |
|
|
global _llm_explainer |
|
|
|
|
|
if _llm_explainer is None: |
|
|
_llm_explainer = LLMExplainer() |
|
|
|
|
|
return _llm_explainer |