Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -166,17 +166,18 @@ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM
|
|
| 166 |
# -----------------------
|
| 167 |
# Pipeline: analyze -> produce analysis, seed chat
|
| 168 |
# -----------------------
|
|
|
|
|
|
|
|
|
|
| 169 |
def analyze_and_seed_chat(prompts_text: str):
|
| 170 |
"""
|
| 171 |
Called when user clicks Analyze.
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
Where initial_chat_history is a list of tuples for gr.Chatbot: [(user_msg, assistant_msg), ...].
|
| 175 |
"""
|
| 176 |
if not prompts_text.strip():
|
| 177 |
return "Please enter at least one prompt (query) describing what data to gather.", []
|
| 178 |
|
| 179 |
-
# Prepare queries
|
| 180 |
queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
|
| 181 |
scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
|
| 182 |
if scraped.startswith("ERROR"):
|
|
@@ -186,49 +187,54 @@ def analyze_and_seed_chat(prompts_text: str):
|
|
| 186 |
user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
|
| 187 |
|
| 188 |
analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
|
| 189 |
-
# short validation
|
| 190 |
if analysis.startswith("ERROR"):
|
| 191 |
return analysis, []
|
| 192 |
|
| 193 |
-
#
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
|
|
|
| 197 |
return analysis, initial_chat
|
| 198 |
|
| 199 |
# -----------------------
|
| 200 |
# Chat interaction after analysis
|
| 201 |
# -----------------------
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
| 203 |
"""
|
| 204 |
-
|
| 205 |
-
user_message: new user message
|
| 206 |
-
analysis_text:
|
| 207 |
-
|
|
|
|
| 208 |
"""
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
return chat_history + [(user_message, "No analysis available. Please click Analyze first.")]
|
| 212 |
|
| 213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
followup_system = (
|
| 215 |
"You are AURA, a helpful analyst. The conversation context includes a recently generated analysis "
|
| 216 |
-
"from scraped data. Use that analysis as ground truth context; answer follow-up questions "
|
| 217 |
-
"about the analysis, explain rationale, and provide clarifications. Be concise and actionable."
|
| 218 |
)
|
| 219 |
|
| 220 |
-
# Build the user text: include the analysis as a reference block plus the user's question
|
| 221 |
user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate."
|
| 222 |
|
| 223 |
-
# Call LLM
|
| 224 |
assistant_reply = run_llm_system_and_user(followup_system, user_payload)
|
| 225 |
if assistant_reply.startswith("ERROR"):
|
| 226 |
assistant_reply = assistant_reply
|
| 227 |
|
| 228 |
-
# Append
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
return chat_history
|
| 232 |
|
| 233 |
# -----------------------
|
| 234 |
# Gradio UI
|
|
@@ -306,9 +312,13 @@ def build_demo():
|
|
| 306 |
updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
|
| 307 |
return updated_history, "" # second value clears the user_input box
|
| 308 |
|
| 309 |
-
def render_chat(
|
| 310 |
-
"""
|
| 311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
|
| 313 |
# ---- Wire handlers to UI components ----
|
| 314 |
analyze_btn.click(
|
|
|
|
| 166 |
# -----------------------
|
| 167 |
# Pipeline: analyze -> produce analysis, seed chat
|
| 168 |
# -----------------------
|
| 169 |
+
# -----------------------
|
| 170 |
+
# PIPELINE: analyze -> produce analysis, seed chat (Gradio-friendly messages)
|
| 171 |
+
# -----------------------
|
| 172 |
def analyze_and_seed_chat(prompts_text: str):
|
| 173 |
"""
|
| 174 |
Called when user clicks Analyze.
|
| 175 |
+
Returns: (analysis_text, initial_chat_messages_list)
|
| 176 |
+
initial_chat_messages_list is a list of dicts: {"role": "user"|"assistant", "content": "..."}
|
|
|
|
| 177 |
"""
|
| 178 |
if not prompts_text.strip():
|
| 179 |
return "Please enter at least one prompt (query) describing what data to gather.", []
|
| 180 |
|
|
|
|
| 181 |
queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
|
| 182 |
scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
|
| 183 |
if scraped.startswith("ERROR"):
|
|
|
|
| 187 |
user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
|
| 188 |
|
| 189 |
analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
|
|
|
|
| 190 |
if analysis.startswith("ERROR"):
|
| 191 |
return analysis, []
|
| 192 |
|
| 193 |
+
# Seed chat with a user message (the user's original analyze request) and assistant reply (the analysis)
|
| 194 |
+
initial_chat = [
|
| 195 |
+
{"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
|
| 196 |
+
{"role": "assistant", "content": analysis}
|
| 197 |
+
]
|
| 198 |
return analysis, initial_chat
|
| 199 |
|
| 200 |
# -----------------------
|
| 201 |
# Chat interaction after analysis
|
| 202 |
# -----------------------
|
| 203 |
+
# -----------------------
|
| 204 |
+
# Chat: handle follow-ups (append messages as dicts)
|
| 205 |
+
# -----------------------
|
| 206 |
+
def continue_chat(chat_messages, user_message, analysis_text):
|
| 207 |
"""
|
| 208 |
+
chat_messages: list of message dicts (role/content)
|
| 209 |
+
user_message: new user message string
|
| 210 |
+
analysis_text: original analysis string (kept as reference context)
|
| 211 |
+
|
| 212 |
+
Returns: updated list of message dicts
|
| 213 |
"""
|
| 214 |
+
if chat_messages is None:
|
| 215 |
+
chat_messages = []
|
|
|
|
| 216 |
|
| 217 |
+
if not user_message or not user_message.strip():
|
| 218 |
+
return chat_messages
|
| 219 |
+
|
| 220 |
+
# Append user's new message
|
| 221 |
+
chat_messages.append({"role": "user", "content": user_message})
|
| 222 |
+
|
| 223 |
+
# Build LLM input using the analysis_text as reference
|
| 224 |
followup_system = (
|
| 225 |
"You are AURA, a helpful analyst. The conversation context includes a recently generated analysis "
|
| 226 |
+
"from scraped data. Use that analysis as ground truth context; answer follow-up questions, explain rationale, and provide clarifications. Be concise and actionable."
|
|
|
|
| 227 |
)
|
| 228 |
|
|
|
|
| 229 |
user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate."
|
| 230 |
|
|
|
|
| 231 |
assistant_reply = run_llm_system_and_user(followup_system, user_payload)
|
| 232 |
if assistant_reply.startswith("ERROR"):
|
| 233 |
assistant_reply = assistant_reply
|
| 234 |
|
| 235 |
+
# Append assistant reply
|
| 236 |
+
chat_messages.append({"role": "assistant", "content": assistant_reply})
|
| 237 |
+
return chat_messages
|
|
|
|
| 238 |
|
| 239 |
# -----------------------
|
| 240 |
# Gradio UI
|
|
|
|
| 312 |
updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
|
| 313 |
return updated_history, "" # second value clears the user_input box
|
| 314 |
|
| 315 |
+
def render_chat(chat_messages):
|
| 316 |
+
"""
|
| 317 |
+
gr.Chatbot in this Gradio version accepts a list of message dicts.
|
| 318 |
+
Return the message list as-is (or return [] if None).
|
| 319 |
+
"""
|
| 320 |
+
return chat_messages or []
|
| 321 |
+
|
| 322 |
|
| 323 |
# ---- Wire handlers to UI components ----
|
| 324 |
analyze_btn.click(
|