GabrielSalem commited on
Commit
ccf5a94
·
verified ·
1 Parent(s): 2bd3c6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +306 -123
app.py CHANGED
@@ -1,32 +1,43 @@
 
1
  """
2
- AURA Chat — Gradio Space
3
- Single-file Gradio app that:
4
- - Accepts newline-separated prompts (data queries) from the user.
5
- - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM,
6
- and returns a polished analysis with a ranked list of best stocks and an
7
- "Investment Duration" for each stock.
8
- - Seeds a chat component with the generated analysis; user can then chat about it.
9
-
10
- Notes:
11
- - Model, max tokens, and delay between scrapes are fixed.
12
- - User only inputs prompts; everything else is predefined.
13
  """
14
 
15
  import os
 
16
  import time
17
- import requests
18
  import asyncio
 
19
  import atexit
 
 
 
 
20
  from typing import List
21
 
22
  import gradio as gr
23
 
 
 
 
 
 
 
 
 
24
  # -----------------------
25
- # Configuration (fixed)
26
  # -----------------------
27
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
28
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
29
 
 
30
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
31
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
32
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
@@ -34,8 +45,14 @@ SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
34
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
35
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
36
 
 
 
 
 
 
 
37
  # -----------------------
38
- # Prompt engineering (fixed)
39
  # -----------------------
40
  PROMPT_TEMPLATE = f"""
41
  You are AURA, a concise, professional hedge-fund research assistant.
@@ -43,35 +60,40 @@ You are AURA, a concise, professional hedge-fund research assistant.
43
  Task:
44
  - Given scraped data below, produce a clear, readable analysis that:
45
  1) Lists the top 5 stock picks (or fewer if not enough data).
46
- 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets),
47
  and an explicit **Investment Duration** entry: one-line "When to Invest" and one-line "When to Sell".
48
- 3) Keep each stock entry short and scannable. Use a bullet list or numbered list.
49
- 4) At the top, provide a 2-3 sentence summary conclusion.
50
- 5) Output in plain text, clean formatting.
51
- 6) Include a concise "Assumptions & Risks" section (2-3 bullet points).
52
- Max tokens for the LLM response: {MAX_TOKENS}
53
  Model: {LLM_MODEL}
 
54
  """
55
 
56
  # -----------------------
57
- # Scraping
58
  # -----------------------
59
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
60
  payload = {"query": query}
61
  last_err = None
62
- for attempt in range(retries):
63
  try:
64
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
65
  resp.raise_for_status()
66
  data = resp.json()
67
  if isinstance(data, dict):
68
- return "\n".join(f"{k.upper()}:\n{v}" for k, v in data.items())
69
- else:
70
- return str(data)
 
 
71
  except Exception as e:
72
  last_err = e
73
- time.sleep(1.0)
74
- return f"ERROR: Scraper failed: {last_err}"
 
 
 
75
 
76
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
77
  aggregated = []
@@ -80,170 +102,331 @@ def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
80
  if not q:
81
  continue
82
  aggregated.append(f"\n=== QUERY: {q} ===\n")
83
- aggregated.append(deep_scrape(q))
 
84
  time.sleep(delay)
85
  return "\n".join(aggregated)
86
 
87
  # -----------------------
88
- # LLM call
89
  # -----------------------
90
- try:
91
- from openai import OpenAI
92
- except ImportError:
93
- OpenAI = None
94
-
95
- def run_llm_system_and_user(system_prompt: str, user_text: str) -> str:
96
  if OpenAI is None:
97
- return "ERROR: `openai` package not installed."
98
  if not OPENAI_API_KEY:
99
  return "ERROR: OPENAI_API_KEY not set in environment."
100
 
101
- client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
102
  try:
 
103
  completion = client.chat.completions.create(
104
- model=LLM_MODEL,
105
- messages=[{"role": "system", "content": system_prompt},
106
- {"role": "user", "content": user_text}],
107
- max_tokens=MAX_TOKENS
 
 
108
  )
109
- return completion.choices[0].message.content
 
 
 
 
 
 
110
  except Exception as e:
111
  return f"ERROR: LLM call failed: {e}"
112
  finally:
113
  try:
114
- client.close()
 
 
 
 
 
 
 
115
  except Exception:
116
  pass
117
 
118
  # -----------------------
119
- # Analysis pipeline
120
  # -----------------------
121
  def analyze_and_seed_chat(prompts_text: str):
122
- if not prompts_text.strip():
123
- return "Please enter at least one prompt.", []
 
 
 
 
124
 
125
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
126
- scraped = multi_scrape(queries)
127
  if scraped.startswith("ERROR"):
128
  return scraped, []
129
 
130
- user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions."
131
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
132
  if analysis.startswith("ERROR"):
133
  return analysis, []
134
 
135
- # Seed chat as a list of dicts
136
  initial_chat = [
137
- {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
138
- {"role": "assistant", "content": analysis}
139
  ]
140
  return analysis, initial_chat
141
 
142
- def continue_chat(chat_messages, user_message, analysis_text):
143
- if not user_message.strip():
 
 
 
 
 
 
144
  return chat_messages
 
 
145
  chat_messages.append({"role": "user", "content": user_message})
 
146
  followup_system = (
147
- "You are AURA, a helpful analyst. The conversation context includes a recently generated analysis."
 
148
  )
149
- user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely."
 
150
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
151
  chat_messages.append({"role": "assistant", "content": assistant_reply})
152
  return chat_messages
153
 
154
- # Convert dict chat to Gradio format
155
- def convert_to_gradio_chat_format(chat_messages):
156
- formatted = []
157
- i = 0
158
- while i < len(chat_messages):
159
- if chat_messages[i]["role"] == "user":
160
- user_msg = chat_messages[i]["content"]
161
- assistant_msg = ""
162
- if i + 1 < len(chat_messages) and chat_messages[i + 1]["role"] == "assistant":
163
- assistant_msg = chat_messages[i + 1]["content"]
164
- i += 1
165
- formatted.append((user_msg, assistant_msg))
166
- i += 1
167
- return formatted
168
-
169
- # -----------------------
170
- # Gradio UI
171
- # -----------------------
172
- def build_ui():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
174
- # Custom CSS
175
  gr.HTML("""
 
 
176
  <style>
177
- .gradio-container { max-width: 1200px; margin: 20px auto; font-family: 'Arial', sans-serif; }
178
- .analysis-box { background: #f9f9f9; border-radius: 10px; padding: 12px; box-shadow: 0 4px 12px rgba(0,0,0,0.08); }
179
- .chat-box { background: #ffffff; border-radius: 10px; padding: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.05); }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  </style>
181
  """)
182
 
183
- gr.Markdown("# AURA Chat Hedge Fund Picks")
184
- gr.Markdown("Enter data prompts (one per line). Click **Analyze**. Then chat about the generated analysis.")
185
-
186
- with gr.Row():
187
- # Left container: input
188
  with gr.Column(scale=1):
189
- prompts = gr.Textbox(
190
- lines=6,
191
- label="Data Prompts (one per line)",
192
- placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys"
 
 
 
 
 
 
 
 
 
 
 
193
  )
194
- analyze_btn = gr.Button("Analyze", variant="primary")
195
- error_box = gr.Markdown("", visible=False)
196
- gr.Markdown(f"**Fixed settings:** Model = `{LLM_MODEL}`, Max tokens = `{MAX_TOKENS}`, Scrape delay = `{SCRAPE_DELAY}s`")
197
- gr.Markdown("Add your `OPENAI_API_KEY` to Space Secrets.")
198
-
199
- # Right container: analysis + chat
200
- with gr.Column(scale=2):
201
- analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False, elem_classes="analysis-box")
202
- gr.Markdown("**Chat with AURA about this analysis**")
203
- chatbot = gr.Chatbot(label="AURA Chat", height=420, elem_classes="chat-box")
204
- user_input = gr.Textbox(placeholder="Ask follow-up questions...", label="Your question")
205
- send_btn = gr.Button("Send")
206
-
207
- # States
208
- analysis_state = gr.State("")
209
- chat_state = gr.State([])
210
-
211
- # Handlers
212
- def on_analyze(prompts_text):
213
- analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
214
- if analysis_text.startswith("ERROR"):
215
- return "", f"**Error:** {analysis_text}", "", []
216
- return analysis_text, "", analysis_text, initial_chat
217
-
218
- def on_send(chat_state_list, user_msg, analysis_text):
219
- updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
220
- return updated_history, ""
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  analyze_btn.click(
223
  fn=on_analyze,
224
  inputs=[prompts],
225
- outputs=[analysis_out, error_box, analysis_state, chat_state]
226
  )
 
 
 
 
 
 
 
227
  send_btn.click(
228
  fn=on_send,
229
  inputs=[chat_state, user_input, analysis_state],
230
- outputs=[chat_state, user_input]
231
  )
232
  user_input.submit(
233
  fn=on_send,
234
  inputs=[chat_state, user_input, analysis_state],
235
- outputs=[chat_state, user_input]
236
- )
237
- chat_state.change(
238
- fn=convert_to_gradio_chat_format,
239
- inputs=[chat_state],
240
- outputs=[chatbot]
241
  )
 
 
 
 
242
  return demo
243
 
244
  # -----------------------
245
  # Run
246
  # -----------------------
247
  if __name__ == "__main__":
248
- demo = build_ui()
249
  demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))
 
1
+ #!/usr/bin/env python3
2
  """
3
+ AURA Chat — Gradio Space (single-file)
4
+ - Fixed model, tokens, and scrape delay (not editable in UI).
5
+ - User supplies data prompts (one per line) and presses Analyze.
6
+ - App scrapes via SCRAPER_API_URL, runs LLM analysis, returns a polished "Top picks" analysis
7
+ with Investment Duration (When to Invest / When to Sell) for each stock.
8
+ - The analysis seeds a chat conversation; the user can then ask follow-ups referencing the analysis.
9
+ - Robust lifecycle: creates/ closes OpenAI client per-call and tries to avoid asyncio fd shutdown warnings.
 
 
 
 
10
  """
11
 
12
  import os
13
+ import sys
14
  import time
 
15
  import asyncio
16
+ import requests
17
  import atexit
18
+ import traceback
19
+ import gc
20
+ import socket
21
+ from datetime import datetime
22
  from typing import List
23
 
24
  import gradio as gr
25
 
26
+ # Defensive: make a fresh event loop early to avoid fd race during interpreter shutdown
27
+ if sys.platform != "win32":
28
+ try:
29
+ loop = asyncio.new_event_loop()
30
+ asyncio.set_event_loop(loop)
31
+ except Exception:
32
+ traceback.print_exc()
33
+
34
  # -----------------------
35
+ # Fixed configuration (locked)
36
  # -----------------------
37
  SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape")
38
  SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"}
39
 
40
+ # Locked model & tokens & delay (not editable from UI)
41
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
42
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
43
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
 
45
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
46
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
47
 
48
+ # Attempt to import OpenAI client class (SDK must be installed)
49
+ try:
50
+ from openai import OpenAI
51
+ except Exception:
52
+ OpenAI = None
53
+
54
  # -----------------------
55
+ # System prompt (locked)
56
  # -----------------------
57
  PROMPT_TEMPLATE = f"""
58
  You are AURA, a concise, professional hedge-fund research assistant.
 
60
  Task:
61
  - Given scraped data below, produce a clear, readable analysis that:
62
  1) Lists the top 5 stock picks (or fewer if not enough data).
63
+ 2) For each stock provide: Ticker / Company name, 2 short rationale bullets,
64
  and an explicit **Investment Duration** entry: one-line "When to Invest" and one-line "When to Sell".
65
+ 3) Provide a 2–3 sentence summary conclusion at the top.
66
+ 4) After the list, include a concise "Assumptions & Risks" section (23 bullets).
67
+ 5) Use clean, scannable formatting (numbered list, bold headers). No JSON. Human-readable.
68
+
 
69
  Model: {LLM_MODEL}
70
+ Max tokens: {MAX_TOKENS}
71
  """
72
 
73
  # -----------------------
74
+ # Scraping helpers
75
  # -----------------------
76
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
77
  payload = {"query": query}
78
  last_err = None
79
+ for attempt in range(1, retries + 1):
80
  try:
81
  resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
82
  resp.raise_for_status()
83
  data = resp.json()
84
  if isinstance(data, dict):
85
+ pieces = []
86
+ for k, v in data.items():
87
+ pieces.append(f"{k.upper()}:\n{v}\n")
88
+ return "\n".join(pieces)
89
+ return str(data)
90
  except Exception as e:
91
  last_err = e
92
+ if attempt < retries:
93
+ time.sleep(1.0)
94
+ else:
95
+ return f"ERROR: Scraper failed: {e}"
96
+ return f"ERROR: {last_err}"
97
 
98
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
99
  aggregated = []
 
102
  if not q:
103
  continue
104
  aggregated.append(f"\n=== QUERY: {q} ===\n")
105
+ scraped = deep_scrape(q)
106
+ aggregated.append(scraped)
107
  time.sleep(delay)
108
  return "\n".join(aggregated)
109
 
110
  # -----------------------
111
+ # LLM call (safe create/close per-call)
112
  # -----------------------
113
+ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM_MODEL, max_tokens: int = MAX_TOKENS) -> str:
 
 
 
 
 
114
  if OpenAI is None:
115
+ return "ERROR: `openai` package not installed (see requirements)."
116
  if not OPENAI_API_KEY:
117
  return "ERROR: OPENAI_API_KEY not set in environment."
118
 
119
+ client = None
120
  try:
121
+ client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
122
  completion = client.chat.completions.create(
123
+ model=model,
124
+ messages=[
125
+ {"role": "system", "content": system_prompt},
126
+ {"role": "user", "content": user_text},
127
+ ],
128
+ max_tokens=max_tokens,
129
  )
130
+ # Guarded extraction
131
+ if hasattr(completion, "choices") and len(completion.choices) > 0:
132
+ try:
133
+ return completion.choices[0].message.content
134
+ except Exception:
135
+ return str(completion.choices[0])
136
+ return str(completion)
137
  except Exception as e:
138
  return f"ERROR: LLM call failed: {e}"
139
  finally:
140
  try:
141
+ if client is not None:
142
+ try:
143
+ client.close()
144
+ except Exception:
145
+ try:
146
+ asyncio.get_event_loop().run_until_complete(client.aclose())
147
+ except Exception:
148
+ pass
149
  except Exception:
150
  pass
151
 
152
  # -----------------------
153
+ # Pipeline functions (Gradio-friendly: use message dicts)
154
  # -----------------------
155
  def analyze_and_seed_chat(prompts_text: str):
156
+ """
157
+ Returns: analysis_text (string), initial_chat (list of message dicts)
158
+ message dicts: {"role": "user"|"assistant", "content": "..."}
159
+ """
160
+ if not prompts_text or not prompts_text.strip():
161
+ return "Please enter at least one prompt (query) describing what data to gather.", []
162
 
163
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
164
+ scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
165
  if scraped.startswith("ERROR"):
166
  return scraped, []
167
 
168
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
169
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
170
  if analysis.startswith("ERROR"):
171
  return analysis, []
172
 
 
173
  initial_chat = [
174
+ {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
175
+ {"role": "assistant", "content": analysis},
176
  ]
177
  return analysis, initial_chat
178
 
179
+ def continue_chat(chat_messages: List[dict], user_message: str, analysis_text: str) -> List[dict]:
180
+ """
181
+ Appends user message and assistant response, returns updated list of message dicts.
182
+ """
183
+ if chat_messages is None:
184
+ chat_messages = []
185
+
186
+ if not user_message or not user_message.strip():
187
  return chat_messages
188
+
189
+ # Append user message
190
  chat_messages.append({"role": "user", "content": user_message})
191
+
192
  followup_system = (
193
+ "You are AURA, a helpful analyst. Use the provided analysis as the authoritative context. "
194
+ "Answer follow-up questions about the analysis, explain rationale, and be concise and actionable."
195
  )
196
+ user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nAnswer concisely."
197
+
198
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
199
  chat_messages.append({"role": "assistant", "content": assistant_reply})
200
  return chat_messages
201
 
202
+ # -----------------------
203
+ # Aggressive cleanup to reduce 'Invalid file descriptor: -1' noise at shutdown
204
+ # -----------------------
205
+ def _aggressive_cleanup():
206
+ try:
207
+ gc.collect()
208
+ except Exception:
209
+ pass
210
+ try:
211
+ loop = asyncio.get_event_loop()
212
+ if loop.is_running():
213
+ try:
214
+ loop.stop()
215
+ except Exception:
216
+ pass
217
+ if not loop.is_closed():
218
+ try:
219
+ loop.close()
220
+ except Exception:
221
+ pass
222
+ except Exception:
223
+ pass
224
+
225
+ # Close any lingering sockets found via GC (best-effort)
226
+ try:
227
+ for obj in gc.get_objects():
228
+ try:
229
+ if isinstance(obj, socket.socket):
230
+ try:
231
+ obj.close()
232
+ except Exception:
233
+ pass
234
+ except Exception:
235
+ pass
236
+
237
+ atexit.register(_aggressive_cleanup)
238
+
239
+ # -----------------------
240
+ # Beautiful responsive UI (single build function)
241
+ # -----------------------
242
+ def build_demo():
243
  with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
244
+ # Inject responsive CSS & fonts
245
  gr.HTML("""
246
+ <link rel="preconnect" href="https://fonts.googleapis.com">
247
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700;800&display=swap" rel="stylesheet">
248
  <style>
249
+ :root{
250
+ --bg:#0f1724;
251
+ --card:#0b1220;
252
+ --muted:#9aa4b2;
253
+ --accent:#6ee7b7;
254
+ --glass: rgba(255,255,255,0.03);
255
+ }
256
+ body, .gradio-container { font-family: Inter, system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial; background: linear-gradient(180deg,#071028 0%, #071831 100%); color: #e6eef6; }
257
+ .container { max-width:1200px; margin:18px auto; padding:18px; }
258
+ .topbar { display:flex; gap:12px; align-items:center; justify-content:space-between; margin-bottom:12px; }
259
+ .brand { display:flex; gap:12px; align-items:center; }
260
+ .logo { width:48px; height:48px; border-radius:10px; background:linear-gradient(135deg,#10b981,#06b6d4); display:flex; align-items:center; justify-content:center; font-weight:700; color:#021028; font-size:18px; box-shadow:0 8px 30px rgba(2,16,40,0.6); }
261
+ .title { font-size:20px; font-weight:700; margin:0; }
262
+ .subtitle { color:var(--muted); font-size:13px; margin-top:2px; }
263
+ .panel { background: linear-gradient(180deg, rgba(255,255,255,0.02), rgba(255,255,255,0.01)); border-radius:12px; padding:14px; box-shadow: 0 6px 30px rgba(2,6,23,0.7); border:1px solid rgba(255,255,255,0.03); }
264
+ .left { min-width: 300px; max-width: 520px; }
265
+ .right { flex:1; }
266
+ .analysis-card { background: linear-gradient(180deg, rgba(255,255,255,0.02), rgba(255,255,255,0.01)); padding:14px; border-radius:10px; min-height:220px; overflow:auto; }
267
+ .muted { color:var(--muted); font-size:13px; }
268
+ .small { font-size:12px; color:var(--muted); }
269
+ .button-row { display:flex; gap:10px; margin-top:10px; }
270
+ .pill { display:inline-block; background: rgba(255,255,255,0.03); padding:6px 10px; border-radius:999px; color:var(--muted); font-size:13px; }
271
+ .chat-container { height:420px; overflow:auto; border-radius:10px; padding:8px; background: linear-gradient(180deg, rgba(255,255,255,0.01), rgba(255,255,255,0.005)); border:1px solid rgba(255,255,255,0.03); }
272
+ /* Responsive */
273
+ @media (max-width: 880px){
274
+ .topbar { flex-direction:column; align-items:flex-start; gap:6px; }
275
+ .layout-row { flex-direction:column; gap:12px; }
276
+ }
277
  </style>
278
  """)
279
 
280
+ # Top bar / header
281
+ with gr.Row(elem_id="top-row"):
 
 
 
282
  with gr.Column(scale=1):
283
+ gr.HTML(
284
+ """
285
+ <div class="container">
286
+ <div class="topbar">
287
+ <div class="brand">
288
+ <div class="logo">A</div>
289
+ <div>
290
+ <div class="title">AURA — Hedge Fund Picks</div>
291
+ <div class="subtitle">Scrape • Synthesize • Serve concise investment durations</div>
292
+ </div>
293
+ </div>
294
+ <div class="small">Model locked • Max tokens locked • Delay locked</div>
295
+ </div>
296
+ </div>
297
+ """
298
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ # Main layout
301
+ with gr.Row(elem_classes="layout-row", visible=True):
302
+ # Left column: inputs
303
+ with gr.Column(scale=1, min_width=320, elem_classes="left"):
304
+ with gr.Group(elem_classes="panel"):
305
+ gr.Markdown("### Data prompts")
306
+ prompts = gr.Textbox(lines=6, placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys", label=None)
307
+ gr.Markdown("**Only provide prompts**. Model, tokens and scrape delay are fixed.")
308
+ with gr.Row():
309
+ analyze_btn = gr.Button("Analyze", variant="primary")
310
+ clear_btn = gr.Button("Clear", variant="secondary")
311
+ gr.Markdown("**Status**")
312
+ status = gr.Markdown("Idle", elem_id="status-box")
313
+ gr.Markdown("**Settings**")
314
+ gr.HTML(f"<div class='pill'>Model: {LLM_MODEL}</div> <div class='pill'>Max tokens: {MAX_TOKENS}</div> <div class='pill'>Delay: {SCRAPE_DELAY}s</div>")
315
+
316
+ # Right column: analysis + chat
317
+ with gr.Column(scale=2, min_width=420, elem_classes="right"):
318
+ with gr.Group(elem_classes="panel"):
319
+ gr.Markdown("### Generated Analysis")
320
+ analysis_html = gr.HTML("<div class='analysis-card muted'>No analysis yet. Enter prompts and press <strong>Analyze</strong>.</div>")
321
+ gr.Markdown("### Chat (ask follow-ups about the analysis)")
322
+ chatbot = gr.Chatbot(elem_classes="chat-container", label=None)
323
+ with gr.Row():
324
+ user_input = gr.Textbox(placeholder="Ask a follow-up question about the analysis...", label=None)
325
+ send_btn = gr.Button("Send", variant="primary")
326
+
327
+ # Hidden states
328
+ analysis_state = gr.State("") # string
329
+ chat_state = gr.State([]) # list of message dicts
330
+
331
+ # ---- Handler functions (defined in scope) ----
332
+ def set_status(text: str):
333
+ # small helper to update status markdown box
334
+ return gr.update(value=f"**{text}**")
335
+
336
+ def on_clear():
337
+ return "", gr.update(value="<div class='analysis-card muted'>No analysis yet. Enter prompts and press <strong>Analyze</strong>.</div>"), [], gr.update(value=[]), set_status("Cleared")
338
+
339
+ def on_analyze(prompts_text: str):
340
+ # Start
341
+ status_msg = "Scraping..."
342
+ analysis_preview = "<div class='analysis-card muted'>Working... scraping data and calling model. This may take a few seconds.</div>"
343
+ # Immediately return a quick UI update while heavy work runs (Gradio will process synchronously).
344
+ # Now run real pipeline:
345
+ try:
346
+ status_msg = "Scraping..."
347
+ # update status for UI
348
+ # Collect queries
349
+ queries = [line.strip() for line in (prompts_text or "").splitlines() if line.strip()]
350
+ if not queries:
351
+ return "", gr.update(value="<div class='analysis-card muted'>Please provide at least one data prompt.</div>"), [], [], set_status("Idle")
352
+
353
+ scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
354
+ if scraped.startswith("ERROR"):
355
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Error:</strong> {scraped}</div>"), [], [], set_status("Scrape error")
356
+
357
+ status_msg = "Generating analysis (LLM)..."
358
+
359
+ user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease produce the analysis as instructed in the system prompt."
360
+ analysis_text = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
361
+
362
+ if analysis_text.startswith("ERROR"):
363
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Error:</strong> {analysis_text}</div>"), [], [], set_status("LLM error")
364
+
365
+ # Build nicely formatted HTML preview (we display the raw LLM text wrapped in <pre> for readability)
366
+ safe_html = "<div class='analysis-card'><pre style='white-space:pre-wrap; font-family:Inter, monospace; font-size:14px; color:#dfeefc;'>" + \
367
+ gr.escape(analysis_text) + "</pre></div>"
368
+
369
+ # Seed chat messages: user + assistant
370
+ initial_chat = [
371
+ {"role": "user", "content": f"Analyze the data provided (prompts: {', '.join(queries)})"},
372
+ {"role": "assistant", "content": analysis_text},
373
+ ]
374
+
375
+ return analysis_text, gr.update(value=safe_html), initial_chat, initial_chat, set_status("Done")
376
+ except Exception as e:
377
+ tb = traceback.format_exc()
378
+ return "", gr.update(value=f"<div class='analysis-card muted'><strong>Unexpected error:</strong> {e}</div>"), [], [], set_status("Error")
379
+
380
+ def on_send(chat_messages: List[dict], user_msg: str, analysis_text: str):
381
+ if not user_msg or not user_msg.strip():
382
+ return chat_messages or [], ""
383
+ # Append and get updated messages
384
+ updated = continue_chat(chat_messages or [], user_msg, analysis_text or "")
385
+ return updated, ""
386
+
387
+ def render_chat(chat_messages: List[dict]):
388
+ """
389
+ Gradio Chatbot in some versions accepts list of dicts {"role","content"}.
390
+ We will return list of dicts unchanged where possible. If Gradio fails,
391
+ it will raise — but previously we fixed to produce dicts.
392
+ """
393
+ if not chat_messages:
394
+ return []
395
+ # Return as-is
396
+ return chat_messages
397
+
398
+ # ---- Wire up events ----
399
  analyze_btn.click(
400
  fn=on_analyze,
401
  inputs=[prompts],
402
+ outputs=[analysis_state, analysis_html, chat_state, chatbot, status],
403
  )
404
+
405
+ clear_btn.click(
406
+ fn=on_clear,
407
+ inputs=[],
408
+ outputs=[prompts, analysis_html, chat_state, chatbot, status],
409
+ )
410
+
411
  send_btn.click(
412
  fn=on_send,
413
  inputs=[chat_state, user_input, analysis_state],
414
+ outputs=[chat_state, user_input],
415
  )
416
  user_input.submit(
417
  fn=on_send,
418
  inputs=[chat_state, user_input, analysis_state],
419
+ outputs=[chat_state, user_input],
 
 
 
 
 
420
  )
421
+
422
+ # Keep chatbot UI updated
423
+ chat_state.change(fn=render_chat, inputs=[chat_state], outputs=[chatbot])
424
+
425
  return demo
426
 
427
  # -----------------------
428
  # Run
429
  # -----------------------
430
  if __name__ == "__main__":
431
+ demo = build_demo()
432
  demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))