GabrielSalem commited on
Commit
6e526ac
·
verified ·
1 Parent(s): 6fafd23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +243 -189
app.py CHANGED
@@ -1,30 +1,29 @@
1
- # app.py
2
  """
3
- AURA Chat — Gradio Space
4
- Single-file Gradio app that:
5
- - Accepts newline-separated prompts (data queries) from the user.
6
- - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM,
7
- and returns a polished analysis with a ranked list of best stocks and an
8
- "Investment Duration" (when to enter / when to exit) for each stock.
9
- - Seeds a chat component with the generated analysis; user can then chat about it.
10
- Notes:
11
- - Model, max tokens, and delay between scrapes are fixed and cannot be changed via UI.
12
- - Set OPENAI_API_KEY in environment (Space Secrets).
13
  """
14
 
15
  import os
16
- import time
17
  import sys
 
18
  import asyncio
19
  import requests
20
  import atexit
21
  import traceback
 
 
22
  from datetime import datetime
23
  from typing import List
24
 
25
  import gradio as gr
26
 
27
- # Defensive: ensure a fresh event loop early to avoid fd race on shutdown.
28
  if sys.platform != "win32":
29
  try:
30
  loop = asyncio.new_event_loop()
@@ -33,23 +32,27 @@ if sys.platform != "win32":
33
  traceback.print_exc()
34
 
35
  # -----------------------
36
- # Configuration (fixed)
37
  # -----------------------
38
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
39
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
40
 
41
- # FIXED model & tokens (cannot be changed from UI)
42
- LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free") # locked model id
43
- MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000")) # locked max tokens
44
- SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0")) # locked delay between scrapes (seconds)
45
 
46
- # OpenAI key is read from env (set in HF Space Secrets). We create the client lazily per-call.
47
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
48
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
49
 
50
- # If OPENAI_API_KEY is missing, UI will show a clear error on Analyze click.
 
 
 
 
 
51
  # -----------------------
52
- # Prompt engineering (fixed) — instruct the model to produce consistent output.
53
  # -----------------------
54
  PROMPT_TEMPLATE = f"""
55
  You are AURA, a concise, professional hedge-fund research assistant.
@@ -57,24 +60,20 @@ You are AURA, a concise, professional hedge-fund research assistant.
57
  Task:
58
  - Given scraped data below, produce a clear, readable analysis that:
59
  1) Lists the top 5 stock picks (or fewer if not enough data).
60
- 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets),
61
- and an explicit **Investment Duration** entry: a one-line "When to Invest" and
62
- a one-line "When to Sell" instruction (these two lines are mandatory for each stock).
63
- 3) Keep each stock entry short and scannable. Use a bullet list or numbered list.
64
- 4) At the top, provide a 2-3 sentence summary conclusion (market context + highest conviction pick).
65
- 5) Output in plain text, clean formatting, easy for humans to read. No JSON.
66
- 6) After the list, include a concise "Assumptions & Risks" section (2-3 bullet points).
67
-
68
- Important: Be decisive. If data is insufficient, state that clearly and provide the best-available picks with lower confidence.
69
- Max tokens for the LLM response: {MAX_TOKENS}
70
  Model: {LLM_MODEL}
 
71
  """
72
 
73
  # -----------------------
74
- # Helper: scraping
75
  # -----------------------
76
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
77
- """Post a query to SCRAPER_API_URL and return a readable aggregation (or an error string)."""
78
  payload = {"query": query}
79
  last_err = None
80
  for attempt in range(1, retries + 1):
@@ -82,14 +81,12 @@ def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
82
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
83
  resp.raise_for_status()
84
  data = resp.json()
85
- # Format into readable text
86
  if isinstance(data, dict):
87
- parts = []
88
  for k, v in data.items():
89
- parts.append(f"{k.upper()}:\n{v}\n")
90
- return "\n".join(parts)
91
- else:
92
- return str(data)
93
  except Exception as e:
94
  last_err = e
95
  if attempt < retries:
@@ -99,7 +96,6 @@ def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
99
  return f"ERROR: {last_err}"
100
 
101
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
102
- """Scrape multiple queries and join results into one large string."""
103
  aggregated = []
104
  for q in queries:
105
  q = q.strip()
@@ -112,21 +108,13 @@ def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
112
  return "\n".join(aggregated)
113
 
114
  # -----------------------
115
- # LLM interaction (safe: create+close per call)
116
  # -----------------------
117
- # Using the 'openai' SDK style that provides OpenAI class available in some providers.
118
- # If your environment uses a different SDK, adjust accordingly.
119
- try:
120
- from openai import OpenAI # keep import local; if package missing, we'll error nicely at runtime
121
- except Exception:
122
- OpenAI = None
123
-
124
  def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM_MODEL, max_tokens: int = MAX_TOKENS) -> str:
125
- """Create the OpenAI client lazily, call the chat completions endpoint, then close."""
126
  if OpenAI is None:
127
- return "ERROR: `openai` package not installed or available. See requirements."
128
  if not OPENAI_API_KEY:
129
- return "ERROR: OPENAI_API_KEY not set in environment. Please add it to Space Secrets."
130
 
131
  client = None
132
  try:
@@ -139,7 +127,7 @@ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM
139
  ],
140
  max_tokens=max_tokens,
141
  )
142
- # Extract content robustly
143
  if hasattr(completion, "choices") and len(completion.choices) > 0:
144
  try:
145
  return completion.choices[0].message.content
@@ -149,13 +137,11 @@ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM
149
  except Exception as e:
150
  return f"ERROR: LLM call failed: {e}"
151
  finally:
152
- # try to close client transport
153
  try:
154
  if client is not None:
155
  try:
156
  client.close()
157
  except Exception:
158
- # try async close if available
159
  try:
160
  asyncio.get_event_loop().run_until_complete(client.aclose())
161
  except Exception:
@@ -164,18 +150,14 @@ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM
164
  pass
165
 
166
  # -----------------------
167
- # Pipeline: analyze -> produce analysis, seed chat
168
- # -----------------------
169
- # -----------------------
170
- # PIPELINE: analyze -> produce analysis, seed chat (Gradio-friendly messages)
171
  # -----------------------
172
  def analyze_and_seed_chat(prompts_text: str):
173
  """
174
- Called when user clicks Analyze.
175
- Returns: (analysis_text, initial_chat_messages_list)
176
- initial_chat_messages_list is a list of dicts: {"role": "user"|"assistant", "content": "..."}
177
  """
178
- if not prompts_text.strip():
179
  return "Please enter at least one prompt (query) describing what data to gather.", []
180
 
181
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
@@ -183,33 +165,20 @@ def analyze_and_seed_chat(prompts_text: str):
183
  if scraped.startswith("ERROR"):
184
  return scraped, []
185
 
186
- # Compose user payload for LLM: scraped data + instruction to format picks only
187
- user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
188
-
189
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
190
  if analysis.startswith("ERROR"):
191
  return analysis, []
192
 
193
- # Seed chat with a user message (the user's original analyze request) and assistant reply (the analysis)
194
  initial_chat = [
195
- {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
196
- {"role": "assistant", "content": analysis}
197
  ]
198
  return analysis, initial_chat
199
 
200
- # -----------------------
201
- # Chat interaction after analysis
202
- # -----------------------
203
- # -----------------------
204
- # Chat: handle follow-ups (append messages as dicts)
205
- # -----------------------
206
- def continue_chat(chat_messages, user_message, analysis_text):
207
  """
208
- chat_messages: list of message dicts (role/content)
209
- user_message: new user message string
210
- analysis_text: original analysis string (kept as reference context)
211
-
212
- Returns: updated list of message dicts
213
  """
214
  if chat_messages is None:
215
  chat_messages = []
@@ -217,163 +186,248 @@ def continue_chat(chat_messages, user_message, analysis_text):
217
  if not user_message or not user_message.strip():
218
  return chat_messages
219
 
220
- # Append user's new message
221
  chat_messages.append({"role": "user", "content": user_message})
222
 
223
- # Build LLM input using the analysis_text as reference
224
  followup_system = (
225
- "You are AURA, a helpful analyst. The conversation context includes a recently generated analysis "
226
- "from scraped data. Use that analysis as ground truth context; answer follow-up questions, explain rationale, and provide clarifications. Be concise and actionable."
227
  )
228
-
229
- user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate."
230
 
231
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
232
- if assistant_reply.startswith("ERROR"):
233
- assistant_reply = assistant_reply
234
-
235
- # Append assistant reply
236
  chat_messages.append({"role": "assistant", "content": assistant_reply})
237
  return chat_messages
238
 
239
  # -----------------------
240
- # Gradio UI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  # -----------------------
242
  def build_demo():
243
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
244
-
245
- # Inject custom CSS safely
246
  gr.HTML("""
 
 
247
  <style>
248
- .gradio-container { max-width: 1100px; margin: 18px auto; }
249
- .header { text-align: left; margin-bottom: 6px; }
250
- .muted { color: #7d8590; font-size: 14px; }
251
- .analysis-box {
252
- background: #ffffff;
253
- border-radius: 8px;
254
- padding: 12px;
255
- box-shadow: 0 4px 14px rgba(0,0,0,0.06);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  }
257
  </style>
258
  """)
259
 
260
- gr.Markdown("# AURA Chat Hedge Fund Picks")
261
- gr.Markdown("**Enter one or more data prompts (one per line)** — e.g. `SEC insider transactions october 2025 company XYZ`.\n\nOnly input prompts; model, tokens and timing are fixed. Press **Analyze** to fetch & generate the picks. After analysis you can chat with the assistant about the results.")
262
-
263
- with gr.Row():
264
  with gr.Column(scale=1):
265
- prompts = gr.Textbox(
266
- lines=6,
267
- label="Data Prompts (one per line)",
268
- placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys"
 
 
 
 
 
 
 
 
 
 
 
269
  )
270
- analyze_btn = gr.Button("Analyze", variant="primary")
271
- error_box = gr.Markdown("", visible=False)
272
- gr.Markdown(f"**Fixed settings:** Model = `{LLM_MODEL}` • Max tokens = `{MAX_TOKENS}` • Scrape delay = `{SCRAPE_DELAY}s`")
273
- gr.Markdown("**Important:** Add your `OPENAI_API_KEY` to Space Secrets before running.")
274
- with gr.Column(scale=1):
275
- analysis_out = gr.Textbox(
276
- label="Generated Analysis (Top picks with Investment Duration)",
277
- lines=18,
278
- interactive=False
279
- )
280
- gr.Markdown("**Chat with AURA about this analysis**")
281
- chatbot = gr.Chatbot(label="AURA Chat", height=420)
282
- user_input = gr.Textbox(
283
- placeholder="Ask a follow-up question about the analysis...",
284
- label="Your question"
285
- )
286
- send_btn = gr.Button("Send")
287
 
288
- # States
289
- analysis_state = gr.State("") # holds the analysis text
290
- chat_state = gr.State([]) # holds list of (user, assistant) tuples
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
- # ---- Handler functions (must be defined before wiring) ----
293
- def on_analyze(prompts_text):
294
- """
295
- Called when user clicks Analyze.
296
- Returns: analysis_out (string), error_box (gr.Markdown value), analysis_state (string), chat_state (list)
297
- """
298
- analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
299
- if analysis_text.startswith("ERROR"):
300
- # Show error in the error_box; leave other outputs unchanged
301
- return "", f"**Error:** {analysis_text}", "", []
302
- # Success: fill analysis_out, hide error box (empty string), set analysis_state and seed chat_state
303
- return analysis_text, "", analysis_text, initial_chat
304
-
305
- def on_send(chat_state_list, user_msg, analysis_text):
306
- """
307
- Called when user sends a chat message.
308
- Returns: updated chat_state, cleared user_input
309
- """
 
 
 
 
 
 
 
 
 
 
310
  if not user_msg or not user_msg.strip():
311
- return chat_state_list or [], ""
312
- updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
313
- return updated_history, "" # second value clears the user_input box
 
314
 
315
- def render_chat(chat_messages):
316
  """
317
- gr.Chatbot in this Gradio version accepts a list of message dicts.
318
- Return the message list as-is (or return [] if None).
 
319
  """
320
- return chat_messages or []
 
 
 
321
 
322
-
323
- # ---- Wire handlers to UI components ----
324
  analyze_btn.click(
325
  fn=on_analyze,
326
  inputs=[prompts],
327
- outputs=[analysis_out, error_box, analysis_state, chat_state]
 
 
 
 
 
 
328
  )
329
 
330
  send_btn.click(
331
  fn=on_send,
332
  inputs=[chat_state, user_input, analysis_state],
333
- outputs=[chat_state, user_input]
334
  )
335
-
336
- # Allow Enter-key submission from the textbox
337
  user_input.submit(
338
  fn=on_send,
339
  inputs=[chat_state, user_input, analysis_state],
340
- outputs=[chat_state, user_input]
341
  )
342
 
343
- # Keep Chatbot display updated whenever chat_state changes
344
- chat_state.change(
345
- fn=render_chat,
346
- inputs=[chat_state],
347
- outputs=[chatbot]
348
- )
349
 
350
  return demo
351
 
352
-
353
-
354
- # -----------------------
355
- # Clean shutdown helper
356
- # -----------------------
357
- def _cleanup_on_exit():
358
- try:
359
- loop = asyncio.get_event_loop()
360
- if loop and not loop.is_closed():
361
- try:
362
- loop.stop()
363
- except Exception:
364
- pass
365
- try:
366
- loop.close()
367
- except Exception:
368
- pass
369
- except Exception:
370
- pass
371
-
372
- atexit.register(_cleanup_on_exit)
373
-
374
  # -----------------------
375
  # Run
376
  # -----------------------
377
  if __name__ == "__main__":
378
  demo = build_demo()
379
  demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))
 
 
1
+ #!/usr/bin/env python3
2
  """
3
+ AURA Chat — Gradio Space (single-file)
4
+ - Fixed model, tokens, and scrape delay (not editable in UI).
5
+ - User supplies data prompts (one per line) and presses Analyze.
6
+ - App scrapes via SCRAPER_API_URL, runs LLM analysis, returns a polished "Top picks" analysis
7
+ with Investment Duration (When to Invest / When to Sell) for each stock.
8
+ - The analysis seeds a chat conversation; the user can then ask follow-ups referencing the analysis.
9
+ - Robust lifecycle: creates/ closes OpenAI client per-call and tries to avoid asyncio fd shutdown warnings.
 
 
 
10
  """
11
 
12
  import os
 
13
  import sys
14
+ import time
15
  import asyncio
16
  import requests
17
  import atexit
18
  import traceback
19
+ import gc
20
+ import socket
21
  from datetime import datetime
22
  from typing import List
23
 
24
  import gradio as gr
25
 
26
+ # Defensive: make a fresh event loop early to avoid fd race during interpreter shutdown
27
  if sys.platform != "win32":
28
  try:
29
  loop = asyncio.new_event_loop()
 
32
  traceback.print_exc()
33
 
34
  # -----------------------
35
+ # Fixed configuration (locked)
36
  # -----------------------
37
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
38
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
39
 
40
+ # Locked model & tokens & delay (not editable from UI)
41
+ LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
42
+ MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
43
+ SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
44
 
 
45
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
46
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
47
 
48
+ # Attempt to import OpenAI client class (SDK must be installed)
49
+ try:
50
+ from openai import OpenAI
51
+ except Exception:
52
+ OpenAI = None
53
+
54
  # -----------------------
55
+ # System prompt (locked)
56
  # -----------------------
57
  PROMPT_TEMPLATE = f"""
58
  You are AURA, a concise, professional hedge-fund research assistant.
 
60
  Task:
61
  - Given scraped data below, produce a clear, readable analysis that:
62
  1) Lists the top 5 stock picks (or fewer if not enough data).
63
+ 2) For each stock provide: Ticker / Company name, 2 short rationale bullets,
64
+ and an explicit **Investment Duration** entry: one-line "When to Invest" and one-line "When to Sell".
65
+ 3) Provide a 2–3 sentence summary conclusion at the top.
66
+ 4) After the list, include a concise "Assumptions & Risks" section (2–3 bullets).
67
+ 5) Use clean, scannable formatting (numbered list, bold headers). No JSON. Human-readable.
68
+
 
 
 
 
69
  Model: {LLM_MODEL}
70
+ Max tokens: {MAX_TOKENS}
71
  """
72
 
73
  # -----------------------
74
+ # Scraping helpers
75
  # -----------------------
76
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
 
77
  payload = {"query": query}
78
  last_err = None
79
  for attempt in range(1, retries + 1):
 
81
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
82
  resp.raise_for_status()
83
  data = resp.json()
 
84
  if isinstance(data, dict):
85
+ pieces = []
86
  for k, v in data.items():
87
+ pieces.append(f"{k.upper()}:\n{v}\n")
88
+ return "\n".join(pieces)
89
+ return str(data)
 
90
  except Exception as e:
91
  last_err = e
92
  if attempt < retries:
 
96
  return f"ERROR: {last_err}"
97
 
98
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
 
99
  aggregated = []
100
  for q in queries:
101
  q = q.strip()
 
108
  return "\n".join(aggregated)
109
 
110
  # -----------------------
111
+ # LLM call (safe create/close per-call)
112
  # -----------------------
 
 
 
 
 
 
 
113
  def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM_MODEL, max_tokens: int = MAX_TOKENS) -> str:
 
114
  if OpenAI is None:
115
+ return "ERROR: `openai` package not installed (see requirements)."
116
  if not OPENAI_API_KEY:
117
+ return "ERROR: OPENAI_API_KEY not set in environment."
118
 
119
  client = None
120
  try:
 
127
  ],
128
  max_tokens=max_tokens,
129
  )
130
+ # Guarded extraction
131
  if hasattr(completion, "choices") and len(completion.choices) > 0:
132
  try:
133
  return completion.choices[0].message.content
 
137
  except Exception as e:
138
  return f"ERROR: LLM call failed: {e}"
139
  finally:
 
140
  try:
141
  if client is not None:
142
  try:
143
  client.close()
144
  except Exception:
 
145
  try:
146
  asyncio.get_event_loop().run_until_complete(client.aclose())
147
  except Exception:
 
150
  pass
151
 
152
  # -----------------------
153
+ # Pipeline functions (Gradio-friendly: use message dicts)
 
 
 
154
  # -----------------------
155
  def analyze_and_seed_chat(prompts_text: str):
156
  """
157
+ Returns: analysis_text (string), initial_chat (list of message dicts)
158
+ message dicts: {"role": "user"|"assistant", "content": "..."}
 
159
  """
160
+ if not prompts_text or not prompts_text.strip():
161
  return "Please enter at least one prompt (query) describing what data to gather.", []
162
 
163
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
 
165
  if scraped.startswith("ERROR"):
166
  return scraped, []
167
 
168
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
 
 
169
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
170
  if analysis.startswith("ERROR"):
171
  return analysis, []
172
 
 
173
  initial_chat = [
174
+ {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
175
+ {"role": "assistant", "content": analysis},
176
  ]
177
  return analysis, initial_chat
178
 
179
+ def continue_chat(chat_messages: List[dict], user_message: str, analysis_text: str) -> List[dict]:
 
 
 
 
 
 
180
  """
181
+ Appends user message and assistant response, returns updated list of message dicts.
 
 
 
 
182
  """
183
  if chat_messages is None:
184
  chat_messages = []
 
186
  if not user_message or not user_message.strip():
187
  return chat_messages
188
 
189
+ # Append user message
190
  chat_messages.append({"role": "user", "content": user_message})
191
 
 
192
  followup_system = (
193
+ "You are AURA, a helpful analyst. Use the provided analysis as the authoritative context. "
194
+ "Answer follow-up questions about the analysis, explain rationale, and be concise and actionable."
195
  )
196
+ user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nAnswer concisely."
 
197
 
198
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
 
 
 
 
199
  chat_messages.append({"role": "assistant", "content": assistant_reply})
200
  return chat_messages
201
 
202
  # -----------------------
203
+ # Aggressive cleanup to reduce 'Invalid file descriptor: -1' noise at shutdown
204
+ # -----------------------
205
+ def _aggressive_cleanup():
206
+ try:
207
+ gc.collect()
208
+ except Exception:
209
+ pass
210
+ try:
211
+ loop = asyncio.get_event_loop()
212
+ if loop.is_running():
213
+ try:
214
+ loop.stop()
215
+ except Exception:
216
+ pass
217
+ if not loop.is_closed():
218
+ try:
219
+ loop.close()
220
+ except Exception:
221
+ pass
222
+ except Exception:
223
+ pass
224
+
225
+ # Close any lingering sockets found via GC (best-effort)
226
+ try:
227
+ for obj in gc.get_objects():
228
+ try:
229
+ if isinstance(obj, socket.socket):
230
+ try:
231
+ obj.close()
232
+ except Exception:
233
+ pass
234
+ except Exception:
235
+ pass
236
+
237
+ atexit.register(_aggressive_cleanup)
238
+
239
+ # -----------------------
240
+ # Beautiful responsive UI (single build function)
241
  # -----------------------
242
  def build_demo():
243
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
244
+ # Inject responsive CSS & fonts
 
245
  gr.HTML("""
246
+ <link rel="preconnect" href="https://fonts.googleapis.com">
247
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700;800&display=swap" rel="stylesheet">
248
  <style>
249
+ :root{
250
+ --bg:#0f1724;
251
+ --card:#0b1220;
252
+ --muted:#9aa4b2;
253
+ --accent:#6ee7b7;
254
+ --glass: rgba(255,255,255,0.03);
255
+ }
256
+ body, .gradio-container { font-family: Inter, system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial; background: linear-gradient(180deg,#071028 0%, #071831 100%); color: #e6eef6; }
257
+ .container { max-width:1200px; margin:18px auto; padding:18px; }
258
+ .topbar { display:flex; gap:12px; align-items:center; justify-content:space-between; margin-bottom:12px; }
259
+ .brand { display:flex; gap:12px; align-items:center; }
260
+ .logo { width:48px; height:48px; border-radius:10px; background:linear-gradient(135deg,#10b981,#06b6d4); display:flex; align-items:center; justify-content:center; font-weight:700; color:#021028; font-size:18px; box-shadow:0 8px 30px rgba(2,16,40,0.6); }
261
+ .title { font-size:20px; font-weight:700; margin:0; }
262
+ .subtitle { color:var(--muted); font-size:13px; margin-top:2px; }
263
+ .panel { background: linear-gradient(180deg, rgba(255,255,255,0.02), rgba(255,255,255,0.01)); border-radius:12px; padding:14px; box-shadow: 0 6px 30px rgba(2,6,23,0.7); border:1px solid rgba(255,255,255,0.03); }
264
+ .left { min-width: 300px; max-width: 520px; }
265
+ .right { flex:1; }
266
+ .analysis-card { background: linear-gradient(180deg, rgba(255,255,255,0.02), rgba(255,255,255,0.01)); padding:14px; border-radius:10px; min-height:220px; overflow:auto; }
267
+ .muted { color:var(--muted); font-size:13px; }
268
+ .small { font-size:12px; color:var(--muted); }
269
+ .button-row { display:flex; gap:10px; margin-top:10px; }
270
+ .pill { display:inline-block; background: rgba(255,255,255,0.03); padding:6px 10px; border-radius:999px; color:var(--muted); font-size:13px; }
271
+ .chat-container { height:420px; overflow:auto; border-radius:10px; padding:8px; background: linear-gradient(180deg, rgba(255,255,255,0.01), rgba(255,255,255,0.005)); border:1px solid rgba(255,255,255,0.03); }
272
+ /* Responsive */
273
+ @media (max-width: 880px){
274
+ .topbar { flex-direction:column; align-items:flex-start; gap:6px; }
275
+ .layout-row { flex-direction:column; gap:12px; }
276
  }
277
  </style>
278
  """)
279
 
280
+ # Top bar / header
281
+ with gr.Row(elem_id="top-row"):
 
 
282
  with gr.Column(scale=1):
283
+ gr.HTML(
284
+ """
285
+ <div class="container">
286
+ <div class="topbar">
287
+ <div class="brand">
288
+ <div class="logo">A</div>
289
+ <div>
290
+ <div class="title">AURA — Hedge Fund Picks</div>
291
+ <div class="subtitle">Scrape • Synthesize • Serve concise investment durations</div>
292
+ </div>
293
+ </div>
294
+ <div class="small">Model locked • Max tokens locked • Delay locked</div>
295
+ </div>
296
+ </div>
297
+ """
298
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ # Main layout
301
+ with gr.Row(elem_classes="layout-row", visible=True):
302
+ # Left column: inputs
303
+ with gr.Column(scale=1, min_width=320, elem_classes="left"):
304
+ with gr.Group(elem_classes="panel"):
305
+ gr.Markdown("### Data prompts")
306
+ prompts = gr.Textbox(lines=6, placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys", label=None)
307
+ gr.Markdown("**Only provide prompts**. Model, tokens and scrape delay are fixed.")
308
+ with gr.Row():
309
+ analyze_btn = gr.Button("Analyze", variant="primary")
310
+ clear_btn = gr.Button("Clear", variant="secondary")
311
+ gr.Markdown("**Status**")
312
+ status = gr.Markdown("Idle", elem_id="status-box")
313
+ gr.Markdown("**Settings**")
314
+ gr.HTML(f"<div class='pill'>Model: {LLM_MODEL}</div> <div class='pill'>Max tokens: {MAX_TOKENS}</div> <div class='pill'>Delay: {SCRAPE_DELAY}s</div>")
315
+
316
+ # Right column: analysis + chat
317
+ with gr.Column(scale=2, min_width=420, elem_classes="right"):
318
+ with gr.Group(elem_classes="panel"):
319
+ gr.Markdown("### Generated Analysis")
320
+ analysis_html = gr.HTML("<div class='analysis-card muted'>No analysis yet. Enter prompts and press <strong>Analyze</strong>.</div>")
321
+ gr.Markdown("### Chat (ask follow-ups about the analysis)")
322
+ chatbot = gr.Chatbot(elem_classes="chat-container", label=None)
323
+ with gr.Row():
324
+ user_input = gr.Textbox(placeholder="Ask a follow-up question about the analysis...", label=None)
325
+ send_btn = gr.Button("Send", variant="primary")
326
+
327
+ # Hidden states
328
+ analysis_state = gr.State("") # string
329
+ chat_state = gr.State([]) # list of message dicts
330
+
331
+ # ---- Handler functions (defined in scope) ----
332
+ def set_status(text: str):
333
+ # small helper to update status markdown box
334
+ return gr.update(value=f"**{text}**")
335
+
336
+ def on_clear():
337
+ return "", gr.update(value="<div class='analysis-card muted'>No analysis yet. Enter prompts and press <strong>Analyze</strong>.</div>"), [], gr.update(value=[]), set_status("Cleared")
338
+
339
+ def on_analyze(prompts_text: str):
340
+ # Start
341
+ status_msg = "Scraping..."
342
+ analysis_preview = "<div class='analysis-card muted'>Working... scraping data and calling model. This may take a few seconds.</div>"
343
+ # Immediately return a quick UI update while heavy work runs (Gradio will process synchronously).
344
+ # Now run real pipeline:
345
+ try:
346
+ status_msg = "Scraping..."
347
+ # update status for UI
348
+ # Collect queries
349
+ queries = [line.strip() for line in (prompts_text or "").splitlines() if line.strip()]
350
+ if not queries:
351
+ return "", gr.update(value="<div class='analysis-card muted'>Please provide at least one data prompt.</div>"), [], [], set_status("Idle")
352
 
353
+ scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
354
+ if scraped.startswith("ERROR"):
355
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Error:</strong> {scraped}</div>"), [], [], set_status("Scrape error")
356
+
357
+ status_msg = "Generating analysis (LLM)..."
358
+
359
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
360
+ analysis_text = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
361
+
362
+ if analysis_text.startswith("ERROR"):
363
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Error:</strong> {analysis_text}</div>"), [], [], set_status("LLM error")
364
+
365
+ # Build nicely formatted HTML preview (we display the raw LLM text wrapped in <pre> for readability)
366
+ safe_html = "<div class='analysis-card'><pre style='white-space:pre-wrap; font-family:Inter, monospace; font-size:14px; color:#dfeefc;'>" + \
367
+ gr.escape(analysis_text) + "</pre></div>"
368
+
369
+ # Seed chat messages: user + assistant
370
+ initial_chat = [
371
+ {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
372
+ {"role": "assistant", "content": analysis_text},
373
+ ]
374
+
375
+ return analysis_text, gr.update(value=safe_html), initial_chat, initial_chat, set_status("Done")
376
+ except Exception as e:
377
+ tb = traceback.format_exc()
378
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Unexpected error:</strong> {e}</div>"), [], [], set_status("Error")
379
+
380
+ def on_send(chat_messages: List[dict], user_msg: str, analysis_text: str):
381
  if not user_msg or not user_msg.strip():
382
+ return chat_messages or [], ""
383
+ # Append and get updated messages
384
+ updated = continue_chat(chat_messages or [], user_msg, analysis_text or "")
385
+ return updated, ""
386
 
387
+ def render_chat(chat_messages: List[dict]):
388
  """
389
+ Gradio Chatbot in some versions accepts list of dicts {"role","content"}.
390
+ We will return list of dicts unchanged where possible. If Gradio fails,
391
+ it will raise — but previously we fixed to produce dicts.
392
  """
393
+ if not chat_messages:
394
+ return []
395
+ # Return as-is
396
+ return chat_messages
397
 
398
+ # ---- Wire up events ----
 
399
  analyze_btn.click(
400
  fn=on_analyze,
401
  inputs=[prompts],
402
+ outputs=[analysis_state, analysis_html, chat_state, chatbot, status],
403
+ )
404
+
405
+ clear_btn.click(
406
+ fn=on_clear,
407
+ inputs=[],
408
+ outputs=[prompts, analysis_html, chat_state, chatbot, status],
409
  )
410
 
411
  send_btn.click(
412
  fn=on_send,
413
  inputs=[chat_state, user_input, analysis_state],
414
+ outputs=[chat_state, user_input],
415
  )
 
 
416
  user_input.submit(
417
  fn=on_send,
418
  inputs=[chat_state, user_input, analysis_state],
419
+ outputs=[chat_state, user_input],
420
  )
421
 
422
+ # Keep chatbot UI updated
423
+ chat_state.change(fn=render_chat, inputs=[chat_state], outputs=[chatbot])
 
 
 
 
424
 
425
  return demo
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
  # -----------------------
428
  # Run
429
  # -----------------------
430
  if __name__ == "__main__":
431
  demo = build_demo()
432
  demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))
433
+