Update utils/gemini_explainer.py
Browse files- utils/gemini_explainer.py +55 -0
utils/gemini_explainer.py
CHANGED
|
@@ -188,6 +188,61 @@ Keep it under 120 words."""
|
|
| 188 |
return self._generate_fallback_insights(
|
| 189 |
total_spending, total_rewards, optimization_score
|
| 190 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
def _generate_fallback_insights(
|
| 193 |
self,
|
|
|
|
| 188 |
return self._generate_fallback_insights(
|
| 189 |
total_spending, total_rewards, optimization_score
|
| 190 |
)
|
| 191 |
+
|
| 192 |
+
def chat_response(self, message: str, user_context: dict, chat_history: list) -> str:
|
| 193 |
+
"""
|
| 194 |
+
Generate conversational response using Gemini
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
message: User's question
|
| 198 |
+
user_context: User profile data (cards, spending, etc.)
|
| 199 |
+
chat_history: Previous conversation turns
|
| 200 |
+
|
| 201 |
+
Returns:
|
| 202 |
+
str: Gemini's response
|
| 203 |
+
"""
|
| 204 |
+
if not self.enabled:
|
| 205 |
+
return "Gemini AI is currently unavailable. Please check your API configuration."
|
| 206 |
+
|
| 207 |
+
try:
|
| 208 |
+
# Build context from user data
|
| 209 |
+
context_str = f"""
|
| 210 |
+
You are a helpful credit card rewards expert assistant. You're chatting with a user who has the following profile:
|
| 211 |
+
|
| 212 |
+
**User Profile:**
|
| 213 |
+
- Cards in wallet: {', '.join(user_context.get('cards', ['Unknown']))}
|
| 214 |
+
- Monthly spending: ${user_context.get('monthly_spending', 0):.2f}
|
| 215 |
+
- Top spending category: {user_context.get('top_category', 'Unknown')}
|
| 216 |
+
- Total rewards earned: ${user_context.get('total_rewards', 0):.2f}
|
| 217 |
+
- Optimization score: {user_context.get('optimization_score', 0)}/100
|
| 218 |
+
|
| 219 |
+
**Your role:**
|
| 220 |
+
- Answer questions about credit cards, rewards, and optimization strategies
|
| 221 |
+
- Be conversational, friendly, and concise (2-3 paragraphs max)
|
| 222 |
+
- Reference the user's specific cards and spending when relevant
|
| 223 |
+
- Provide actionable advice
|
| 224 |
+
- If asked about a specific card, explain its benefits and best use cases
|
| 225 |
+
|
| 226 |
+
**Conversation history:**
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
# Add recent chat history (last 3 turns)
|
| 230 |
+
for user_msg, bot_msg in chat_history[-3:]:
|
| 231 |
+
context_str += f"\nUser: {user_msg}\nAssistant: {bot_msg}\n"
|
| 232 |
+
|
| 233 |
+
context_str += f"\n**Current question:** {message}\n\nProvide a helpful, personalized response:"
|
| 234 |
+
|
| 235 |
+
# Generate response
|
| 236 |
+
response = self.model.generate_content(context_str)
|
| 237 |
+
|
| 238 |
+
if response and response.text:
|
| 239 |
+
return response.text.strip()
|
| 240 |
+
else:
|
| 241 |
+
return "I'm having trouble generating a response. Could you rephrase your question?"
|
| 242 |
+
|
| 243 |
+
except Exception as e:
|
| 244 |
+
print(f"Gemini chat error: {e}")
|
| 245 |
+
return "I encountered an error processing your question. Please try asking in a different way."
|
| 246 |
|
| 247 |
def _generate_fallback_insights(
|
| 248 |
self,
|