File size: 68,765 Bytes
5ab87e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
import re
import os
import json
import requests
import time
from typing import List, Optional, Dict
from .prompts import DEEPRESEARCH_SYS_PROMPT, SUMMARY_SYS_PROMPT 
from functools import wraps
from together import Together          # pip install together
from datetime import datetime          # needed for retries / logging and date string (for giving current date and time to LLM)

#     return decorator
def retry(max: int = 10, sleep: int = 1, fallback=None):
    """
    Retry `max` times and, if still failing, return `fallback`
    instead of raising.  This keeps outer loops alive.
    """
    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            for i in range(max):
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    print(f"[retry] attempt {i+1}/{max} failed: {e}")
                    if i == max - 1:                 # last try exhausted
                        print(f"[retry] giving up – returning {fallback!r}")
                        return fallback              # ← swallow the error
                    if sleep:
                        time.sleep(sleep)
        return wrapper
    return decorator

class ReCall():
    date_str = \
    f"""

    **Note**: Today's Date is {datetime.now().strftime("%Y-%m-%d")}, and time is {datetime.now().strftime("%H:%M:%S")}. This may be useful for answering questions about current events."""

    anti_chinese_str = \
    """
    
    **Note**: Do not respond in chinese, do not think in chinese, only think and respond/answer in English, unless explicitly instructed by the user to respond in some other language."""
    
    # proper_formatting_str = \
    # """
    # **Note**: Provide a well-structured answer first, then put only the final short answer in \\boxed{{}}.

    # **How to format your response**
    # - Write in clear English prose and use Markdown headings/bullets where helpful.
    # - Give a detailed, in-depth explanation of the steps or facts used.
    # - Use LaTeX only for short formulas/equations. For multi-line LaTeX, include line breaks (\\\\) or environments like \\begin{{align}} ... \\end{{align}} when genuinely helpful.
    # - Do **not** wrap the whole response in LaTeX. Only the final short answer goes in \\boxed{{...}} on its own line at the end.

    # **Examples**

    # 1) **Simple fact question**  
    # **Question:** What is the capital of India?  
    # **Brief rationale:** India’s seat of government and primary national institutions are located in New Delhi.  
    # **Final:** \\boxed{{New Delhi}}

    # 2) **Quick calculation**  
    # **Question:** Convert 68^\\circ F to Celsius.  
    # **Approach:** Use C = (F - 32) \\times \\tfrac{{5}}{{9}}.  
    # **Computation:** (68 - 32) \\times \\tfrac{{5}}{{9}} = 20.  
    # **Final:** \\boxed{{20^\\circ C}}

    # 3) **Search & synthesis (structured, detailed)**  
    # **Question:** When did the EU’s GDPR go into effect?
    
    # **Complete Final Response:**
    # '''**Key findings (evidence, concise):**  
    # - **European Commission overview** states GDPR β€œapplies from 25 May 2018.”  
    # - **EUR-Lex (Regulation (EU) 2016/679), Article 99**: entered into force 20 days after publication in the OJ (2016), and **applies from 25 May 2018**.  
    # - **EDPB FAQs/communications** reiterate that enforcement/application begins **25 May 2018**.

    # **Cross-check & validation:**  
    # - Independent primary sources (Commission portal and EUR-Lex) agree on the same application date. A supervisory body source (EDPB) corroborates.

    # **Common pitfalls addressed:**  
    # - Some secondary blogs list **24 May 2018**β€”this confuses the **last day before** applicability with the first day **of** applicability.  
    # - β€œEntered into force” in **2016** (post-publication) is not the same as β€œapplication/effective for obligations,” which is **2018**.

    # **Date normalization:**  
    # - Normalize to an unambiguous calendar date and present in a clear format (e.g., β€œMay 25, 2018”).

    # **Conclusion:**  
    # - The effective (application) date for GDPR obligations across the EU is the same in all Member States and is confirmed by multiple primary sources.

    # **Final:** \\boxed{{May\ 25,\ 2018}}'''
    # """
        
    # print(f"Date string:\n'{date_str}'")
    
    # proper_formatting_str = \
    # """
    # **DeepResearch Response Protocol**  
    # Provide a comprehensive, decision-grade report first, then put only the short final answer in \\boxed{{}} on its own line at the very end.

    # ---

    # ## Mandatory Sections (in order)

    # 1) **Executive Summary**  
    # - 5–10 bullets capturing the direct answer, key numbers/dates, and the top implications.  
    # - Include any material uncertainty (e.g., β€œmoderate confidence due to limited primary data”).

    # 2) **Problem Framing & Scope**  
    # - One short paragraph restating the question, goals, and audience.  
    # - Clarify interpretations, exclusions, and assumptions. Define key terms and acronyms.

    # 3) **Method (Search & Validation Plan)**  
    # - 5–8 bullets detailing how you searched and validated. Include:  
    #     - **Source priority:** primary/official (laws, filings, standards, regulator notices) β†’ reputable secondary (major outlets, respected orgs) β†’ tertiary/background.  
    #     - **Query strategy:** main queries and alternates (synonyms, regional spellings, technical names).  
    #     - **Freshness policy:** prefer the most recent authoritative updates; when dates matter, distinguish **event date**, **publication/update date**, and **effective date**.  
    #     - **Triangulation rule:** corroborate all key claims with β‰₯2 independent reputable sources (or 1 clear primary).  
    #     - **Inclusion/Exclusion:** note discarded sources (paywalled, low quality, self-published without review) and why.  
    #     - **Conflict resolution:** how disagreements will be weighed (mandate, jurisdiction, methodological rigor, recency).

    # 4) **Evidence Ledger (Cited Facts)**  
    # - 6–15 bullets. Each bullet is a **Fact Card**:  
    #     - **Claim:** one-sentence fact.  
    #     - **Evidence:** short quote/figure/line (paraphrase unless a short quote is essential).  
    #     - **Source:** Publisher/Title β€” (Event Date if applicable) β€” Publish/Update Date β€” Access Date.  
    #     - **Confidence:** High / Medium / Low.  
    # - Group with mini-subheadings where helpful (e.g., β€œOfficial notices”, β€œRegulatory filings”, β€œPress coverage”).  
    # - Explicitly flag contradictions.

    # 5) **Timeline of Key Events**  
    # - A compact, chronological list linking milestones to sources; include both event and publication dates where relevant.

    # 6) **Data Extraction & Normalization** (as needed)  
    # - Present important numbers in a small table (≀8 rows) with units, currency (ISO codes, e.g., **USD**), and rounding policy (state precision, e.g., β€œrounded to 2 decimals”).  
    # - Perform any conversions or calculations and show formulas succinctly (LaTeX inline for short formulas, e.g., \\( C = (F-32)\\times\\tfrac{{5}}{{9}} \\); use \\begin{{align}}…\\end{{align}} for multi-step math).  
    # - Specify timezones for dates/times when relevant.

    # 7) **Comparative & Sensitivity Analysis** (if applicable)  
    # - Contrast competing interpretations, options, or sources; note trade-offs.  
    # - Include a brief sensitivity or scenario check if a key parameter could materially change the conclusion.

    # 8) **Synthesis & Conclusion**  
    # - 2–4 tight paragraphs that integrate the evidence, resolve conflicts, and explain *why* the conclusion follows.  
    # - Be explicit about scope limits and residual uncertainties.

    # 9) **Risks, Caveats & Unknowns**  
    # - Bullet the major risks, data gaps, and what would most change the answer.  
    # - Note any ethical, legal, or safety considerations.

    # 10) **Recommendations / Next Steps** (if applicable)  
    #     - Actionable items tailored to the user’s likely goal (e.g., verify with regulator X, monitor source Y weekly, collect dataset Z).

    # 11) **Answer (one sentence)**  
    #     - State the direct answer clearly with units/timezone as needed.

    # 12) **Final**  
    #     - Repeat only the short final answer inside \\boxed{{...}} with no extra words.

    # 13) **Source Log (Audit Trail)**  
    #     - A compact, reproducible list: *Title β€” Publisher/Author β€” (Event Date, if any) β€” Publish/Update Date β€” Access Date β€” URL*.  
    #     - Prefer diverse, authoritative domains; avoid duplicates.

    # ---

    # ## Formatting & Quality Rules

    # - Use clear English with Markdown headings and bullets; favor short paragraphs.  
    # - Do **not** reveal inner monologue or hidden chain-of-thought; provide only public-facing rationale.  
    # - Use LaTeX sparingly for math; do **not** wrap the entire response in LaTeX. Only the final short answer goes in \\boxed{{...}}.  
    # - Always specify units, currency codes, and timezones when relevant.  
    # - When listing β‰₯3 items or comparing options, include a small, focused table rather than long prose.  
    # - If information is uncertain or contested, *quantify* the uncertainty (confidence labels or ranges) and state why.

    # ---

    # ## Depth & Completeness Expectations

    # - **Complex/high-stakes queries**: Populate all sections thoroughly; provide triangulated citations and explicit conflict resolution.  
    # - **Simple fact queries**: Keep Sections 3–9 concise (one to two lines each) but still cite at least one authoritative source.  
    # - Strive for neutrality, reproducibility, and decision usefulness over verbosity.

    # ---
    # """
    
    proper_formatting_str = """"""
    
    sys_prompt_non_search = """You are a helpful assistant. You will answer the user's question based on your knowledge and reasoning ability. You do not have access to the internet or any external tools. Do not use search. Answer all questions yourself.""" + date_str + anti_chinese_str
    
    sys_prompt_websailor_start = """
    You are a Web Information Seeking Master. Your task is to thoroughly seek the internet for information and provide accurate answers to questions. No matter how complex the query, you will not give up until you find the corresponding information.
    In this environment you have access to a set of tools you can use to assist with the user query. 
    You may perform multiple rounds of function calls. In each round, you can call one or more functions.

    As you proceed, adhere to the following principles:

    1. **Persistent Actions for Answers**: You will engage in many interactions, delving deeply into the topic to explore all possible aspects until a satisfactory answer is found.

    2. **Repeated Verification**: Before presenting a Final Answer, you will **cross-check** and **validate the information** you've gathered to confirm its accuracy and reliability.

    3. **Attention to Detail**: You will carefully analyze each information source to ensure that all data is current, relevant, and from credible origins."""
    
    sys_prompt_websailor = """
    You are a Web Information Seeking Master. Your task is to thoroughly seek the internet for information and provide accurate answers to questions. No matter how complex the query, you will not give up until you find the corresponding information.
    In this environment you have access to a set of tools you can use to assist with the user query. 
    You may perform multiple rounds of function calls. In each round, you can call one or more functions.

    As you proceed, adhere to the following principles:

    1. **Persistent Actions for Answers**: You will engage in many interactions, delving deeply into the topic to explore all possible aspects until a satisfactory answer is found.

    2. **Repeated Verification**: Before presenting a Final Answer, you will **cross-check** and **validate the information** you've gathered to confirm its accuracy and reliability.

    3. **Attention to Detail**: You will carefully analyze each information source to ensure that all data is current, relevant, and from credible origins.



    Here are available functions in JSONSchema format: \n```json\n{func_schemas}\n```

    In your response, you need to first think about the reasoning process in the mind and then conduct function calling to get the information or perform the actions if needed. \
    The reasoning process and function calling are enclosed within <think> </think> and <tool_call> </tool_call> tags. \
    The results of the function calls will be given back to you after execution, \
    and you can continue to call functions until you get the final answer for the user's question.

    For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
    <tool_call>
    {{"name": <function-name>, "arguments": <args-json-object>}}
    </tool_call>
    """ + date_str + anti_chinese_str + proper_formatting_str
    
    sys_prompt_websailor_deepseek = """
    You are a Web Information Seeking Master. Your task is to thoroughly seek the internet for information and provide accurate answers to questions. No matter how complex the query, you will not give up until you find the corresponding information.
    In this environment you have access to a set of tools you can use to assist with the user query. 
    You may perform multiple rounds of function calls. In each round, you can call one or more functions.

    As you proceed, adhere to the following principles:

    1. **Persistent Actions for Answers**: You will engage in many interactions, delving deeply into the topic to explore all possible aspects until a satisfactory answer is found.

    2. **Repeated Verification**: Before presenting a Final Answer, you will **cross-check** and **validate the information** you've gathered to confirm its accuracy and reliability.

    3. **Attention to Detail**: You will carefully analyze each information source to ensure that all data is current, relevant, and from credible origins.



    Here are available functions in JSONSchema format: \n```json\n{func_schemas}\n```

    In your response, you need to first think about the reasoning process in the mind and then conduct function calling to get the information or perform the actions if needed. \
    The reasoning process and function calling are enclosed within <think> </think> and <tool_calls_begin> <tool_calls_end> tags. \
    The results of the function calls will be given back to you after execution, \
    and you can continue to call functions until you get the final answer for the user's question. \
    Finally, if you have got the answer, enclose it within \\boxed{{}} with latex format and do not continue to call functions, \
    i.e., <think> Based on the response from the function call, I get the weather information. </think> The weather in Beijing on 2025-04-01 is \\[ \\boxed{{20C}} \\].
    """ + date_str + anti_chinese_str + proper_formatting_str
    
    # sys_prompt_websailor_deepseek = """
    #         You are a Web Information Seeking Master. Seek the internet thoroughly and provide accurate answers. You may use tools multiple times.

    #         Principles:
    #         1) Persistent Actions for Answers: explore deeply until you find satisfactory information.
    #         2) Repeated Verification: cross-check and validate before the final answer.
    #         3) Attention to Detail: ensure sources are current, relevant, and credible.

    #         You have the following tools (JSONSchema):
    #         ```json
    #         {func_schemas}
    #         Follow this EXACT tool-call I/O protocol.

    #         TO CALL ONE OR MORE TOOLS:
    #         Respond only with this block (no extra text before/after):
    #         <|tool▁call▁begin|>function<|tool▁sep|>{tool_name}{args_json}
    #         <|tool▁call▁end|>
    #         ... (repeat <|tool▁call▁begin|>…<|tool▁call▁end|> for multiple tools)
    #         <|tool▁calls▁end|><|end▁of▁sentence|>

    #         HOW TOOL RESULTS ARRIVE:
    #         I will send tool outputs back embedded inside a single user message, each wrapped like:
    #         <tool_response>{one_tool_call_you_made}
    #         {tool_return_text_or_json}
    #         </tool_response>

    #         WHAT TO DO NEXT:

    #         If you still need info, emit another tool-calls block (same exact format).

    #         If you have the final answer, output:
    #         <answer> …your final answer… </answer>
    #         and DO NOT call any more tools.

    #         Important:

    #         Do not expose your internal reasoning; keep thoughts private.

    #         When emitting a tool-calls block, do not include any explanations, only the block specified above.

    #         Arguments must be valid JSON.

    #         Stop tokens to respect: <|end▁of▁sentence|>
    #         """

    system_prompt = """In this environment you have access to a set of tools you can use to assist with the user query. \
    You may perform multiple rounds of function calls. \
    In each round, you can call one or more functions. \

    Here are available functions in JSONSchema format: \n```json\n{func_schemas}\n```

    In your response, you need to first think about the reasoning process in the mind and then conduct function calling to get the information or perform the actions if needed. \
    The reasoning process and function calling are enclosed within <think> </think> and <tool_call> </tool_call> tags. \
    The results of the function calls will be given back to you after execution, \
    and you can continue to call functions until you get the final answer for the user's question. You are encouraged to utilize as many function calls as possible. \
    Finally, if you have got the answer, wrap it in <answer> </answer> **and do not call any more functions**, \
    e.g. <think> Based on the tool results … </think> <answer>20 Β°C</answer>.

    For each function call, return a JSON object with function name and arguments within <tool_call></tool_call> XML tags:
    <tool_call>
    {{"name": <function-name-1>, "arguments": <args-json-object>}}
    </tool_call>""" + date_str + anti_chinese_str + proper_formatting_str

    system_prompt_budget = """
        You are an autonomous reasoning agent with access to external tools.

        The conversation will retain only the *most-recent* <tool_response> block; older ones disappear.  
        As soon as you receive tool results, extract the *essential facts tables links etc* that might be needed for later and restate them inside your <think> section.  
         **Never copy large bodies of text** or raw JSON from tool output into your visible reply; summarise instead.

        β—Ž **Workflow**  
        1. In every round, start with <think> … </think> to lay out your short reasoning.  
        2. If you need external information or an action, emit one or more <tool_call> … </tool_call> blocks (JSON spec below).  
        3. When the environment returns <tool_response>, continue reasoning; you may call more tools.  
        4. Once you can answer the user, wrap the final result in <answer> … </answer> and STOP calling tools.

        β—Ž **Tool call format** (do **not** restate the schema or any explanations):  
        <tool_call>
        {{"name": <function-name-1>, "arguments": <args-json-object>}}
        </tool_call>

        Here are available functions in JSONSchema format: \n```json\n{func_schemas}\n```
    """ + date_str + anti_chinese_str + proper_formatting_str



    system_prompt_forcing_tool_call = """
    In this environment you have access to a set of tools you can use to assist with the user query.
    You may perform multiple rounds of function calls upto ten. In each round, you can call upto three functions.

    ──────────────────────── AVAILABLE TOOLS ────────────────────────
    ```json
    [
    {
        "type": "function",
        "function": {
        "name": "pubmed_search",
        "description": "Search PubMed for Medical related queries.",
        "parameters": {
            "type": "object",
            "properties": {
            "query":  { "type": "string",  "description": "Query to search for." },
            "top_n":  { "type": "integer", "description": "Number of hits", "default": 3 }
            },
            "required": ["query"]
        }
        }
    }
    ]
    ```

    ────────────────────────────── RULES ──────────────────────────────
    1. You MUST issue one pubmed_search tool call for each answer choice. Each query must relate the clinical context to that option.
    2. You MAY NOT skip any option or decide based only on internal reasoning. Evidence must be retrieved for all choices.
    3. You MAY issue follow-up tool calls if your reasoning leads you to need more evidence.
    4. You MUST wrap all reasoning in <think> </think> tags and all tool usage in <tool_call> </tool_call> tags. Number of <tool_call> and </tool_call> tokens in the entire trace MUST always match.
    5. Do NOT casually emit the  <tool_call> </tool_call> during reasoning unless explicitly calling a tool in the proper format. 
    5. Your final answer must be enclosed a single letter corresponding to the correct option enclosed in the <answer> </answer> tags. Do not output anything else inside these tags.
    6. DO NOT use any other confusing tags like <thiking> or </thinking>. 
    7. Each <think> </think> block MUST be followed by a <tool_call> </tool_call> or <answer> </answer> or else the program will break without an answer.

    ───────────────────── DUMMY EXAMPLE INTERLEAVED SKELETON ─────────────────────
    <think>
    We are presented with a 54-year-old woman with invasive ductal carcinoma of the breast and osteolytic lesions in the thoracic spine. This strongly suggests metastatic spread. Our task is to determine the most likely anatomical route of metastasis to the spine.

    Let’s examine the given options:
    A. Hemiazygos vein  
    B. Posterior intercostal veins  
    C. Batson’s vertebral venous plexus  
    D. Internal mammary lymphatics

    We'll evaluate each option in turn using available literature and known anatomical pathways.
    **Option A: Hemiazygos vein**  
    We begin by evaluating whether the hemiazygos vein could be involved in metastatic spread from breast cancer to the spine.
    </think>
    <tool_call>
    {"name": "pubmed_search", "arguments": {"query": "breast cancer metastasis hemiazygos vein", "top_n": 2}}
    </tool_call>
    <tool_response>
    ...
    </tool_response>
    <think>
    There is limited or no strong evidence suggesting the hemiazygos vein is a common or primary route for vertebral metastasis from breast cancer.
    Lets explore **Option B: Posterior intercostal veins**  and  **Option C: Batson’s vertebral venous plexus** and **Option D:Internal mammary lymphatics**
    </think>
    <tool_call>
    {"name": "pubmed_search", "arguments": {"query": "posterior intercostal veins breast cancer spinal metastasis", "top_n": 3}}
    </tool_call>
    <tool_call>
    {"name": "pubmed_search", "arguments": {"query": "Batson vertebral venous plexus breast cancer metastasis", "top_n": 3}}
    </tool_call>
    <tool_call>
    {"name": "pubmed_search", "arguments": {"query": "Internal mammary lymphatics breast cancer metastasis", "top_n": 3}}
    </tool_call>
    <tool_response>
    ...
    </tool_response>
    <think>
    While the posterior intercostal veins may be involved in venous drainage, there is insufficient evidence to support them as a primary route for metastasis to the vertebral column.
    where as Batson’s vertebral venous plexus β€” a valveless venous network that connects the thoracic and abdominal veins directly to the spine. I to find more specific information about option C.
    </think>
    <tool_call>
    {"name": "pubmed_search", "arguments": {"query": ""Batson vertebral venous plexus breast cancer metastasis in people over 50", "top_n": 1}}
    </tool_call>
     <think>
    After evaluating all four options, the most plausible route for breast cancer metastasis to the thoracic spine is clearly via  Batson’s vertebral venous plexus:
    </think>
    <answer>C</answer>
    """ + date_str + anti_chinese_str + proper_formatting_str
    # STOP_TOKENS =STOP_TOKENS = ["<|im_end|>", "<|endoftext|>"


    def __init__(self, executor_url):
        self.executor_url = executor_url

    def init_prompt(self, func_schemas, question, old_prompt: Optional[str] = None, search_on: bool = True) -> str:
        if old_prompt is None or len(old_prompt.strip()) == 0:
            if search_on:
                system_prompt = f"<|im_start|>system\n{self.sys_prompt_websailor.format(func_schemas=func_schemas)}<|im_end|>"
            else:
                system_prompt = f"<|im_start|>system\n{self.sys_prompt_non_search}<|im_end|>"
            user_prompt = f"<|im_start|>user\n{question}<|im_end|>"
            assistant_prefix = f"<|im_start|>assistant\n<think>"
            return system_prompt + "\n" + user_prompt + "\n" + assistant_prefix
        else:
            user_prompt = f"<|im_start|>user\n{question}<|im_end|>"
            assistant_prefix = f"<|im_start|>assistant\n<think>"
            return old_prompt + "\n" + user_prompt + "\n" + assistant_prefix
        
    def replace_sys_prompt(self, old_prompt: str, func_schemas: str, search_on: bool = True) -> str:
        if search_on:
            new_sys_prompt = f"<|im_start|>system\n{self.sys_prompt_websailor.format(func_schemas=func_schemas)}<|im_end|>"
            old_sys_prompt = f"<|im_start|>system\n{self.sys_prompt_non_search}<|im_end|>"
        else:
            new_sys_prompt = f"<|im_start|>system\n{self.sys_prompt_non_search}<|im_end|>"
            old_sys_prompt = f"<|im_start|>system\n{self.sys_prompt_websailor.format(func_schemas=func_schemas)}<|im_end|>"
            
        return old_prompt.replace(old_sys_prompt, new_sys_prompt)

    def _strip_old_tool_responses(self, prompt: str) -> str:
        TOOL_RESPONSE_RE = re.compile(r"<tool_response>.*?</tool_response>\s*", re.DOTALL)
        """Remove every existing <tool_response> … </tool_response> block."""
        return TOOL_RESPONSE_RE.sub("", prompt)

    def cat_assistant_response(self, curr_prompt, assistant_response):
        return curr_prompt + assistant_response + "<|im_end|>"
    
    def cat_tool_results(self, curr_prompt, tool_calls, results):
        tool_response_str = ""
        for tool_call, result in zip(tool_calls, results):
            tool_response_str += f"<tool_response>{tool_call}\n{result}\n</tool_response>\n"
        tool_response_str = f"<|im_start|>user\n{tool_response_str}<|im_end|>"
        assistant_prefix = f"<|im_start|>assistant\n<think>"
        return curr_prompt + "\n" + tool_response_str + "\n" + assistant_prefix

    def format_tool_call(self, tool_call_str: str):
        """Convert JSON function call description to Python executable code string."""
        try:
            call_json = json.loads(tool_call_str)
            func_name = call_json['name']
            arguments = call_json.get('arguments', {})
            
            args_str = ', '.join(f"{k}={repr(v)}" for k, v in arguments.items())
            return f"{func_name}({args_str})"
        except Exception as e:
            return f"Parse tool call failed: {e}"
    
    def execute_tool_calls(self, env: str, tool_calls: List[str]) -> List[str]:
        def exe_tool_call(env, call):
            url = self.executor_url + '/execute'

            call_str = self.format_tool_call(call)
            # print(call_str)
            if call_str.startswith("error: parse tool call failed"):
                return call_str

            try:
                data = {
                    'env': env,
                    'call': call_str
                }
                response = requests.post(url, json=data, timeout=60)
                if response.status_code != 200:
                    return f"error: {response.status_code}"
                response = response.json()
                ret_str = ''
                if response['result']:
                    ret_str += f'result: \n{response["result"]}\n'
                if response['output']:
                    ret_str += f'output: \n{response["output"]}\n'
                if response['error']:
                    ret_str += f'error: \n{response["error"]}\n'
                return ret_str.strip()
            except requests.exceptions.Timeout:
                return "error: execution timed out"
            except Exception as e:
                return str(e)
        
        results = []
        for tool_call in tool_calls:
            result = exe_tool_call(env, tool_call)
            results.append(result)
        return results
    
    def validate_tool_calls(self, output_str):
        start_tags = re.findall(r'<tool_call>', output_str)
        end_tags = re.findall(r'</tool_call>', output_str)
        
        if len(start_tags) != len(end_tags):
            return False
            
        start_positions = [m.start() for m in re.finditer(r'<tool_call>', output_str)]
        end_positions = [m.start() for m in re.finditer(r'</tool_call>', output_str)]
        
        for start, end in zip(start_positions, end_positions):
            if start >= end:
                return False
                
        return True

    def extract_tool_calls(self, output_str):
        if not self.validate_tool_calls(output_str):
            return []

        try:
            pattern = r'<tool_call>((?:(?!</tool_call>).)*)</tool_call>'
            matches = re.finditer(pattern, output_str, re.DOTALL)
            
            return [match.group(1).strip() for match in matches]
        except Exception as e:
            return []
        
    def extract_tool_calls_deepseek(self, output_str):
        if not self.validate_tool_calls(output_str):
            return []

        try:
            pattern = r'<tool_calls_begin>((?:(?!</tool_calls_end>).)*)<tool_calls_end>'
            matches = re.finditer(pattern, output_str, re.DOTALL)
            
            return [match.group(1).strip() for match in matches]
        except Exception as e:
            return []



    @retry(max=5, sleep=1, fallback={"score": 0}) 
    def run_ii_searcher(
        self, 
        env: str, 
        func_schemas: str,
        question: str,
        tokenizer,
        model_url="http://0.0.0.0:1214",
        temperature: float = 0.0,
        max_new_tokens: int = 40960,
        ):
        curr_prompt = self.init_prompt(func_schemas, question)
        all_tool_calls= []
    
        for _ in range(16):
            prompt_tokens = tokenizer(curr_prompt, return_tensors=None, add_special_tokens=False)["input_ids"]
            max_tokens_left = max_new_tokens - len(prompt_tokens) - 100
            # for oss model served via vllm
            # response = requests.post(
            #     f'{model_url}/v1/chat/completions', 
            #     json={
            #         "text": curr_prompt,
            #         # "reasoning": "medium"
            #         },
            # ).json()
            # for sglang served models hf models
            response = requests.post(
                f'{model_url}/generate', 
                json={
                    "text": curr_prompt,
                    "sampling_params": {
                        "temperature": temperature,
                        "max_new_tokens": max_tokens_left,
                        "repetition_penalty": 1.05
                    },

                }
            ).json()
            if "error" in response.keys():
                print("resp",response)
            curr_prompt = self.cat_assistant_response(curr_prompt, response['text'])

            tool_calls: List[str] = self.extract_tool_calls(response['text'])
            all_tool_calls += tool_calls

            if len(tool_calls) == 0:
                break
    
            else:
                results: List[str] = self.execute_tool_calls(env, tool_calls)
                curr_prompt = self.cat_tool_results(curr_prompt, tool_calls, results)

        return curr_prompt, all_tool_calls
    
    # @retry(max=5, sleep=1, fallback={"score": 0}) 
    # def run(
    #     self, 
    #     env: str, 
    #     func_schemas: str,
    #     question: str,
    #     tokenizer,
    #     model_url="http://0.0.0.0:1214",
    #     temperature: float = 0.0,
    #     max_new_tokens: int = 40960,
    #     ):
    #     curr_prompt = self.init_prompt(func_schemas, question)
    #     all_tool_calls= []
    
    #     for i in range(32):
    #         prompt_tokens = tokenizer(curr_prompt, return_tensors=None, add_special_tokens=False)["input_ids"]
    #         max_tokens_left = max_new_tokens - len(prompt_tokens) - 100
    #         # for oss model served via vllm
    #         # response = requests.post(
    #         #     f'{model_url}/v1/chat/completions', 
    #         #     json={
    #         #         "text": curr_prompt,
    #         #         # "reasoning": "medium"
    #         #         },
    #         # ).json()
    #         # for sglang served models hf models
    #         response = requests.post(
    #             f'{model_url}/generate', 
    #             json={
    #                 "text": curr_prompt,
    #                 "sampling_params": {
    #                     "temperature": temperature,
    #                     "max_new_tokens": max_tokens_left,
    #                     "repetition_penalty": 1.05
    #                 },

    #             }
    #         ).json()
    #         if "error" in response.keys():
    #             print("resp",response)
    #         curr_prompt = self.cat_assistant_response(curr_prompt, response['text'])

    #         tool_calls: List[str] = self.extract_tool_calls(response['text'])
    #         all_tool_calls += tool_calls

    #         if len(tool_calls) == 0:
    #             break
    
    #         else:
    #             # print(f"Step-{i+1}")
    #             results: List[str] = self.execute_tool_calls(env, tool_calls)
    #             curr_prompt = self.cat_tool_results(curr_prompt, tool_calls, results)

    #     return curr_prompt, all_tool_calls
    from typing import List, Dict, Any, Tuple
    import requests
    
    def build_summary_prompt(self, question: str, transcript: str, tool_calls: Any) -> str:
        """Assemble a compact but detailed prompt for summarization."""
        tool_str = ""
        if tool_calls is not None:
            try:
                tool_str = str(tool_calls)
            except Exception:
                tool_str = "<unprintable tool_calls>"
        return (
            "You are given a DeepSearch investigation trace.\n\n"
            f"Question:\n{question}\n\n"
            "Trace (model transcript):\n"
            f"{transcript}\n\n"
            "Tool Calls (as-recorded):\n"
            f"{tool_str}\n\n"
            "β€” End of trace β€”"
        )
        
    def reformat_trace(self, s: str) -> str:
        if not s:
            return s

        t = s

        # 1) Speaker tags: <|im_start|>assistant -> "ASSISTANT:\n"
        def _speaker(m: re.Match) -> str:
            role = (m.group(1) or "").strip().upper()
            return f"\n{role}:\n"
        t = re.sub(r"<\|im_start\|\>(\w+)", _speaker, t, flags=re.IGNORECASE)

        # 2) End-of-message tag: drop but keep spacing
        t = re.sub(r"<\|im_end\|\>", "\n", t, flags=re.IGNORECASE)

        # 3) THINK blocks: replace tags with label, keep content
        t = re.sub(r"<think\s*>", "", t, flags=re.IGNORECASE)
        t = re.sub(r"</think\s*>", "\n", t, flags=re.IGNORECASE)

        # 4) TOOL RESPONSE blocks: support both 'response' and the misspelt 'resonse'
        t = re.sub(r"<tool_respon[sc]e\s*>", "SEARCH RESULT\n", t, flags=re.IGNORECASE)
        t = re.sub(r"</tool_respon[sc]e\s*>", "\n", t, flags=re.IGNORECASE)

        # 5) TOOL CALL wrappers: drop tags, keep the JSON/content
        t = re.sub(r"</?tool_call\s*>", "", t, flags=re.IGNORECASE)

        # 6) Any remaining ChatML specials like <|eot_id|>, <|...|> -> remove
        t = re.sub(r"<\|[^>]+?\|>", "", t)

        # 7) Remove any other angle-bracket tags we didn’t explicitly keep
        #    (leaves inner text intact). This will strip e.g. <tool_response_extra>
        t = re.sub(r"</?[^>\n]+?>", "", t)

        # 8) Normalize whitespace (collapse 3+ newlines to 2)
        t = re.sub(r"\n{3,}", "\n\n", t).strip()

        return t
    
    def _openai_client(self):
        try:
            from openai import OpenAI  # type: ignore
        except Exception as e:
            raise RuntimeError("openai package not installed. `pip install openai`") from e
        return OpenAI()
    
    def init_summary_prompt(self, system_prompt: str, prompt: str) -> str:
        system_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>"
        user_prompt = f"<|im_start|>user\n{prompt}<|im_end|>"
        assistant_prefix = f"<|im_start|>assistant\n<think>"
        return system_prompt + "\n" + user_prompt + "\n" + assistant_prefix
    
    def _call_hf_endpoint(self, base_url: str, system_prompt: str, prompt: str, temperature: float, max_tokens: int, deepresearch_on: bool) -> str:
        curr_prompt = self.init_summary_prompt(system_prompt, prompt)
        
        hf_token= os.environ['HF_TOKEN']

        headers = {
            "Accept" : "application/json",
            "Authorization": f"Bearer {hf_token}",
            "Content-Type": "application/json" 
        }

        # print(f"User Prompt:\n{curr_prompt}\n\n")

        response_summary = requests.post(
            url=f"{base_url}",
            headers=headers,
            json={
                "inputs": curr_prompt,
                "parameters": {
                    "temperature": temperature,
                    "max_new_tokens": max_tokens,
                    "top_p": 0.95,
                    "repetition_penalty": 1.05,
                },
            },
            timeout=300,
        ).json()
        
        if isinstance(response_summary, list):
            response_summary = response_summary[0]

        if isinstance(response_summary, dict) and "error" in response_summary:
            # Log the error as assistant text for visibility and break
            err_msg = f"[model_error] {response_summary.get('error')}"
            print("Got error response from summarising model:", err_msg, end="\n\n")

        assistant_text = response_summary.get("generated_text", "")
        
        if curr_prompt == assistant_text[:len(curr_prompt)]:
                assistant_text = assistant_text[len(curr_prompt):]
                
        # print(assistant_text)
        
        report = re.split(r"</think\s*>", assistant_text, flags=re.IGNORECASE)[-1]
        # plan = re.split(r"</think\s*>", assistant_text, flags=re.IGNORECASE)[0]
        
        # print(report, "\n\n")
        
        if not deepresearch_on:
            report = report.strip()
            # report = report[::-1]
            # str_find = "Final Answer:"
            # pos = report.find(str_find[::-1])
            # pos += len(str_find)
            # report = report[pos:][::-1]
            # report = report.rstrip('# \n-').strip(' \n-')
            
            start_tag = "<answer>"
            end_tag = "</answer>"
            pos_start = report.find(start_tag)
            pos_end = report[pos_start:].find(end_tag) + pos_start
            answer = report
            if pos_start != -1 and pos_end != -1:
                answer = report[pos_start + len(start_tag):pos_end].strip()
            
            str_find = "Final Answer:"
            if str_find in answer:
                answer = answer[::-1]
                pos = answer.find(str_find[::-1])
                pos += len(str_find)
                answer = answer[pos:][::-1]
                answer = answer.rstrip('# \n-').strip(' \n-')
            
            # print("answer:")
            # print(answer, "\n\n")
            
            return answer
        
        report = report.strip()
        report = report[::-1]
        str_find = "Sources used"
        pos = report.find(str_find[::-1])
        pos += len(str_find)
        report = report[pos:][::-1]
        report = report.rstrip('# \n-').strip(' \n-')
        
        if not report.startswith("##") and report.startswith("#"):
            report = "#" + report
        elif not report.startswith("##") and not report.startswith("#"):
            report = "## " + report
        
        # report = '\n\n' + report.strip()
        
        # print(report.find('Executive Summary'), report.find('#'))
        # print(f"'{report[:20]}'")
        
        # print(report,"\n\n")
        
        urls = {}
        count = 1
        
        while "[http" in report:
            start_idx = report.find("[http")
            end_idx = report.find("]", start_idx)
            if end_idx != -1:
                url_string = report[start_idx + 1:end_idx]
                url_list = []
                while len(url_string) > 0:
                    pos1 = url_string.find(";")
                    pos2 = url_string.find(",")
                    pos3 = url_string.find(" ")
                    
                    if pos1 == -1:
                        pos1 = len(url_string) + 1
                    if pos2 == -1:
                        pos2 = len(url_string) + 1
                    if pos3 == -1:
                        pos3 = len(url_string) + 1
                    
                    pos = min(pos1, pos2, pos3)
                    
                    if pos == len(url_string) + 1:
                        url = url_string
                    else:
                        url = url_string[:pos]
                    
                    url_list.append(url)

                    if pos < len(url_string):
                        url_string = url_string[pos + 1:].lstrip(" ,;")
                    else:
                        break
                
                report_new = report[:start_idx] + '(**'
                for url in url_list:
                    if url not in urls:
                        urls[url] = count
                        count += 1
                    report_new += f'[{urls[url]}], '
                report_new = report_new[:-2]
                report_new += '**)' + report[end_idx+1:]
                report = report_new
            else:
                break
            
        if len(urls) > 0:
            report += "\n\n## Sources used:\n"
            sorted_urls = sorted(urls.items(), key=lambda x: x[1])
            for url, idx in sorted_urls:
                report += f"- **{idx}**: {url}\n"
            report += '\n'
            # adding references (auto-removed in markdown)
            for url, idx in sorted_urls:
                report += f"[{idx}]: {url}\n"
                
        # print(report,"\n\n")
        
        return report
    
    def _route_and_summarize(
        self,
        summary_llm: str,
        system_prompt: str,
        prompt: str,
        *,
        temperature: float,
        max_tokens: int,
        deepresearch_on: bool,
    ) -> str:
        """
        If `summary_llm` starts with 'http', treat as vLLM base_url; else treat as an OpenAI model id.
        For vLLM, prepend [SYSTEM]/[USER] tags; for OpenAI, pass messages with system+user.
        """
        if not summary_llm.strip().lower().startswith("gpt-"):
            # print(system_prompt)
            # print(prompt)
            return self._call_hf_endpoint(summary_llm, system_prompt, prompt, temperature=temperature, max_tokens=max_tokens, deepresearch_on=deepresearch_on)

        else:
            client = self._openai_client()
            rsp = client.chat.completions.create(
                model=summary_llm,
                temperature=temperature,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user",   "content": prompt},
                ],
                max_tokens=max_tokens,
            )
            
            return rsp.choices[0].message.content or ""

    @retry(max=5, sleep=1, fallback={"score": 0})
    def run(
        self, 
        env: str, 
        func_schemas: str,
        question: str,
        tokenizer,
        model_url: str = "http://0.0.0.0:1214",
        temperature: float = 0.0,
        max_new_tokens: int = 40960,
        top_p: float = 0.6,
        old_prompt: Optional[str] = None,
        deepresearch_on: bool = True,
        summary_llm: str = "gpt-4.1-mini"
    ):
        # ) -> Tuple[str, List[str], List[Dict[str, str]]]:
        """
        Returns:
            curr_prompt: the final prompt buffer (with assistant/tool traces you maintain internally)
            all_tool_calls: flat list of all tool call strings extracted across steps
            chat: a lightweight chat transcript list[{"role": "...", "content": "..."}]
                β€’ 'user' items = the original question + aggregated tool responses
                β€’ 'assistant' items = model responses (and a compact line-list of tool calls)
        """
        # off_str = "\n\n**User has TURNED OFF search**. **DO NOT use search**. **Answer all questions YOURSELF**. **DO NOT use any tools**.\n**YOUR FIRST-RESPONSE WILL BE CONSIDERED AS THE FINAL ANSWER**. **YOU WILL NOT GET TO CALL TOOLS AND WAIT FOR TOOL RESULTS AND THEN ANSWER**.\n**YOU WON'T BE ALLOWED TO CHAT AND CALL TOOLS, IN A MULTI-TURN FASHION**. **YOU WILL CHAT IN A SINGLE-TURN FORMAT**.\n**SO MAKE SURE YOUR FIRST RESPONSE IS THE FINAL ANSWER**.\n"
        
        # if not search_on and (old_prompt is not None and self.sys_prompt_websailor_start not in old_prompt):
        #     question += off_str
        
        search_on = True
        
        if old_prompt is not None:
            old_prompt = self.replace_sys_prompt(old_prompt, func_schemas, search_on)
        
        # Build runtime prompt and initialize accumulators
        curr_prompt = self.init_prompt(func_schemas, question, old_prompt, search_on)
        all_tool_calls: List[str] = []
        chat: List[Dict[str, str]] = []

        # Seed transcript with JUST the question (no system prompt)
        chat.append({"role": "user", "content": question})
        
        for i in range(64):
            # Budget tokens for this step
            prompt_tokens = tokenizer(curr_prompt, return_tensors=None, add_special_tokens=False)["input_ids"]
            max_tokens_left = max(1, max_new_tokens - len(prompt_tokens) - 100)

            # ---- Model call (sglang/vLLM-style JSON) ----
            # If you switch to /v1/chat/completions, adjust accordingly.
            hf_token= os.environ['HF_TOKEN']

            headers = {
                "Accept" : "application/json",
                "Authorization": f"Bearer {hf_token}",
                "Content-Type": "application/json" 
            }

            # print(f"User Prompt:\n{curr_prompt}\n\n")

            response = requests.post(
                url=f"{model_url}",
                headers=headers,
                json={
                    "inputs": curr_prompt,
                    "parameters": {
                        "temperature": temperature,
                        "max_new_tokens": max_tokens_left,
                        "top_p": top_p,
                        "repetition_penalty": 1.05,
                    },
                },
                timeout=300,
            ).json()
            
            if isinstance(response, list):
                response = response[0]

            if isinstance(response, dict) and "error" in response:
                # Log the error as assistant text for visibility and break
                err_msg = f"[model_error] {response.get('error')}"
                print("Got error response from model:", err_msg, end="\n\n")
                chat.append({"role": "assistant", "content": err_msg})
                break

            assistant_text = response.get("generated_text", "")
            
            if curr_prompt == assistant_text[:len(curr_prompt)]:
                # print("Current prompt is a prefix to generated text.")
                # If the assistant's response is just a continuation of the prompt, we can use it directly
                assistant_text = assistant_text[len(curr_prompt):]

            # print(f"Assistant Text:\n{assistant_text}\n\n")

            # Append assistant's raw text to chat
            chat.append({"role": "assistant", "content": assistant_text})

            # Update your running prompt with assistant text
            curr_prompt = self.cat_assistant_response(curr_prompt, assistant_text)

            # Extract tool calls from the assistant text
            if search_on:
                tool_calls: List[str] = self.extract_tool_calls(assistant_text)
            
            else:
                tool_calls: List[str] = []

            # yield "assistant_resp", (assistant_text, tool_calls)

            if tool_calls:
                yield "assistant_resp", (assistant_text, tool_calls)
                all_tool_calls.extend(tool_calls)

                # Log tool calls as an assistant message (newline-joined)
                chat.append({"role": "assistant", "content": "\n".join(tool_calls)})

                # Execute tools and collect results
                results: List[str] = self.execute_tool_calls(env, tool_calls)
                
                yield "tool_results", (results, )

                # Feed tool results back into prompt
                curr_prompt = self.cat_tool_results(curr_prompt, tool_calls, results)

                # Aggregate tool responses into a single user message
                tool_res_blocks = []
                for idx, (call, res) in enumerate(zip(tool_calls, results), 1):
                    tool_res_blocks.append(f"[Tool {idx}] Result:\n{res}")
                chat.append({"role": "user", "content": "\n\n".join(tool_res_blocks)})

            else:
                if search_on:
                    prompt = self.build_summary_prompt(question, self.reformat_trace(curr_prompt) or "", all_tool_calls)
                    system_prompt = DEEPRESEARCH_SYS_PROMPT if deepresearch_on else SUMMARY_SYS_PROMPT

                    summary_text = self._route_and_summarize(
                        summary_llm=summary_llm if deepresearch_on else model_url,
                        system_prompt=system_prompt,
                        prompt=prompt,
                        temperature=0.6,
                        max_tokens=16000,
                        deepresearch_on=deepresearch_on
                    )
                    
                    summary_text_splits = summary_text.split("</think>")
                    summary_text_initial = summary_text_splits[0]
                    summary_text_initial = summary_text_initial.replace("<think>", "").strip()
                    summary_text_final = summary_text_splits[-1]
                    
                    if len(summary_text_initial) > 0 and "</think>" in summary_text:
                        yield "assistant_resp", (summary_text_initial, [])
                        yield "tool_results", ([], )
                    yield "assistant_resp", (summary_text_final, tool_calls)
                    # print(f"No tool calls found in assistant response.\nAssistant Response:\n{assistant_text}\n\n")
                else:
                    yield "assistant_resp", (assistant_text, tool_calls)
                    print(f"Search is off, so no tool calls expected and no tool calls called.\nAssistant Response:\n{assistant_text}\n\n")
                # No tool calls β†’ model produced a final answer; stop.
                break

        # Return the original outputs plus the chat-style transcript
        # return curr_prompt, all_tool_calls, chat
        
        return "end", (curr_prompt, )

    @retry(max=5, sleep=1, fallback={"score": 0})
    def run_deepseek(
        self,
        env: str,
        func_schemas: str,
        question: str,
        model_name: str,
        temperature: float = 0.0,
        top_p: float = 0.95,
        max_tokens: int = 32768,
    ):
        # print("AA"* 100)
        """
        Chat-based ReCall loop for DeepSeek-R1 on Together.
        """
        sys_content = self.sys_prompt_websailor_deepseek.format(func_schemas=func_schemas)
        # sys_content = self.init_prompt(func_schemas, question)

        messages = [
            {"role": "system", "content": sys_content},
            {"role": "user",   "content": question},
        ]
    
        # client = Together(api_key="")
        client = Together(api_key="")
        all_tool_calls = []
        for turn in range(32):  # up to 10 reasoning turns
            resp = client.chat.completions.create(
                model=model_name,
                # model="Qwen/Qwen3-235B-A22B-fp8-tput", 
                messages=messages,
                temperature=temperature,
                top_p=top_p,
                max_tokens=39000,
                stop=["<|end▁of▁sentence|>", "<|im_end|>"]
            )
            # print(resp)
            

            assistant_text = resp.choices[0].message.content
            # print(assistant_text)
            messages.append({"role": "assistant", "content": assistant_text})
            # print(f"assistant_output: {assistant_text}")

            # β›‘ Safe tool call extraction with diagnostic
            # try:
            # print("Extracting tool calls")
            tool_calls = self.extract_tool_calls_deepseek(assistant_text)
            print(tool_calls)
            all_tool_calls += tool_calls
            # except Exception as e:
            #     print(f"Extraction failed with exception {e}")
            #     err_msg = f"<tool_response>Tool call extraction failed on turn {turn+1}: {str(e)}</tool_response>"
            #     messages.append({"role": "user", "content": err_msg})
            #     continue  # continue to next turn instead of breaking
            if "<answer>" in assistant_text:
                break
    
            if len(tool_calls) != 0:
                results = self.execute_tool_calls(env, tool_calls)
                tool_resp_block = "".join(
                    f"<tool_response>{c}\n{r}\n</tool_response>\n"
                    for c, r in zip(tool_calls, results)
                )
                messages.append({"role": "user", "content": tool_resp_block})
                # print(f"Tool Response {tool_resp_block}")
            else:
                print("no answer or tool call")
                break

        trajectory = "\n".join(
            f"<{m['role']}>\n{m['content']}" for m in messages
            if m["role"] != "system"
        )
        return trajectory, all_tool_calls
    
    
         # ────────────────────────────────────────────────────────────────
    # HF-endpoint version of β€œretrieve β†’ inject β†’ tool loop”
    # ────────────────────────────────────────────────────────────────
    @retry(max=5, sleep=1, fallback=None)
    def run_with_prompt_injection(
        self,
        env: str,
        func_schemas: str,
        question: str,
        model_url: str = "http://0.0.0.0:1214",
        temperature: float = 0.0,
        max_new_tokens: int = 512,
        top_n: int = 5,
    ):
        """
        0) call pubmed_search(question, top_n) once via the sandbox
        1) inject those snippets into the very first user message
        2) continue with the normal multi-turn ReCall loop against *model_url*
        """

        # 0️⃣ do a single retrieval tool call
        retrieve_call = json.dumps({
            "name": "pubmed_search",
            "arguments": {"query": question, "top_n": top_n}
        })
        retrieval_raw = self.execute_tool_calls(env, [retrieve_call])[0]
        try:
            snippets_block = retrieval_raw.split("result:", 1)[-1].strip()
        except Exception:
            snippets_block = ""

        # 1️⃣ build initial prompt with injected snippets
        user_msg = (
            f"Question: {question}\n\n"
            "Here are some relevant PubMed snippets:\n"
            f"{snippets_block}"
        ) if snippets_block else f"Question: {question}"

        sys_prompt = self.init_prompt(func_schemas, question)
        system_prompt = f"<|im_start|>system\n{sys_prompt}<|im_end|>"
        user_prompt   = f"<|im_start|>user\n{user_msg}<|im_end|>"
        assistant_pref= f"<|im_start|>assistant\n<think>"
        curr_prompt   = system_prompt + "\n" + user_prompt + "\n" + assistant_pref

        # 2️⃣ normal ReCall loop hitting the HF inference endpoint
        for _ in range(10):
            resp = requests.post(
                f"{model_url}/generate",
                json={
                    "text": curr_prompt,
                    "sampling_params": {
                        "temperature": temperature,
                        "max_new_tokens": max_new_tokens,
                    }
                },
                timeout=120,
            ).json()
            if "error" in resp.keys():
                print("resp", resp)
            assistant_txt = resp["text"]
            curr_prompt = self.cat_assistant_response(curr_prompt, assistant_txt)

            tool_calls = self.extract_tool_calls(assistant_txt)
            if  len(tool_calls) != 0:
                # break  # model produced an answer β†’ done

                results = self.execute_tool_calls(env, tool_calls)
                curr_prompt = self.cat_tool_results(curr_prompt, tool_calls, results)

            else:
                continue
        return curr_prompt
    


    @retry(max=5, sleep=1, fallback={"score": 0}) 
    def run_budget(
        self,
        env: str,
        func_schemas: str,
        question: str,
        model_url: str = "http://0.0.0.0:1214",
        temperature: float = 0.0,
        max_new_tokens: int = 2048,
    ) -> str:
        """
        Execute an agentic dialogue with external tools while *pruning* previous
        <tool_response> blocks to prevent context-length explosion.
        """
        curr_prompt = self.init_prompt(func_schemas, question)

        for _ in range(16):  # hard loop-limit
            # ── 1. Call the model 
            rsp = requests.post(
                f"{model_url}/generate",
                json={
                    "text": curr_prompt,
                    "sampling_params": {
                        "temperature": temperature,
                        "max_new_tokens": max_new_tokens,
                        "stop": ["<|im_end|>", "</think>", "</think>\n" "</think>\n\n"],
                    },
                   
                },
                timeout=120,
            ).json()
            generated = rsp["text"]                       # what you have now
            matched   = rsp["meta_info"]["finish_reason"].get("matched")

            # β‡’Β append the tag back only if it was removed
            if matched and not generated.endswith(matched):
                generated += matched

            # Fail fast on server error
            if "error" in rsp:
                raise RuntimeError(rsp["error"])

            assistant_text: str = rsp["text"]
            curr_prompt = self.cat_assistant_response(curr_prompt, assistant_text)

            # ── 2. Check for final answer ────────────────────────────────────
            if "<answer>" in assistant_text:
                break

            # ── 3. Extract & execute tool calls ──────────────────────────────
            tool_calls: List[str] = self.extract_tool_calls(assistant_text)
            if not tool_calls:        # continue reasoning without calling a tool
                continue

            results: List[str] = self.execute_tool_calls(env, tool_calls)


            # ── 4. BEFORE appending new tool output, drop all old ones ───────
            curr_prompt =self. _strip_old_tool_responses(curr_prompt)

            # ── 5. Append *only* the fresh tool_response block ───────────────
            curr_prompt = self.cat_tool_results(curr_prompt, tool_calls, results)

        return curr_prompt

 

  
    def _strip_old_tool_responses_msgs(self, messages: list[dict]) -> list[dict]:
        """
        Return a copy of `messages` with every *user* message that starts with
        <tool_response> removed.  Keeps assistant turns untouched.
        """
        return [
            m for m in messages
            if not (m["role"] == "user" and m["content"].lstrip().startswith("<tool_response>"))
        ]
    # ────────── budget version ──────────
    @retry(max=5, sleep=1, fallback={"score": 0})
    def run_deepseek_budget(
        self,
        env: str,
        func_schemas: str,
        question: str,
        api_key: str,
        model_name: str,
        temperature: float = 0.0,
        top_p: float = 0.95,
        max_tokens: int = 32768,
        max_turns: int = 10,
    ):
        """
        Chat-based ReCall loop for DeepSeek-R1 **with context-budget pruning**.
        Keeps only the *latest* <tool_response> block to avoid prompt bloat.
        """
        sys_content = self.system_prompt_budget.format(func_schemas=func_schemas)

        messages = [
            {"role": "system", "content": sys_content},
            {"role": "user",   "content": question},
        ]

        client = Together(api_key=api_key)

        for turn in range(max_turns):
            # ── 1. model call ───────────────────────────────────────────────
            resp = client.chat.completions.create(
                model=model_name,
                messages=messages,
                temperature=temperature,
                top_p=top_p,
                max_tokens=max_tokens,
                stop=["</tool_call>", "<|end▁of▁sentence|>"],
            )
            assistant_text = resp.choices[0].message.content
            messages.append({"role": "assistant", "content": assistant_text})

            print(f"**assistant** \n {assistant_text}")

            # ── 2. finished? ────────────────────────────────────────────────
            if "<answer>" in assistant_text:
                break

            # ── 3. parse tool calls ────────────────────────────────────────
            tool_calls = self.extract_tool_calls(assistant_text)
            print(f"**tool_calls** \n {tool_calls}")
            if not tool_calls:
                continue  # keep reasoning without tools

            # ── 4. execute tools ───────────────────────────────────────────
            results = self.execute_tool_calls(env, tool_calls)
            print(f"**tool_response** \n {results}")

            # ── 5. prune & append fresh tool_response ──────────────────────
            messages = self._strip_old_tool_responses_msgs(messages)

            tool_resp_block = "".join(
                f"<tool_response>{c}\n{r}\n</tool_response>\n"
                for c, r in zip(tool_calls, results)
            )
            messages.append({"role": "user", "content": tool_resp_block})

        # ── 6. flatten & return trajectory (sans system for readability) ───
        trajectory = "\n".join(
            f"<{m['role']}>\n{m['content']}" for m in messages if m["role"] != "system"
        )
        return trajectory


    @retry(max=5, sleep=1, fallback=None)
    def run_deepseek_with_prompt_injection(
        self,
        env: str,
        func_schemas: str,
        question: str,
        api_key: str,
        model_name: str,
        temperature: float = 0.0,
        top_p: float = 0.95,
        max_tokens: int = 32768,
    ):
        """
        1) Call pubmed_search(question, top_n=5) as a tool to get snippets.
        2) Inject them into the first user message.
        3) Proceed with the usual DeepSeek-R1 tool‐based rollout.
        """

        # ── Step 0: prepare the single‐tool call for retrieval ───────────────
        retrieve_call = json.dumps({
            "name": "pubmed_search",
            "arguments": {
                "query": question,
                "top_n": 5
            }
        })

        # Execute it once via your helper
        # note: `env` must include whatever import / client‐setup
        #          your sandbox needs to run pubmed_search(...)
        raw_retrieval_results = self.execute_tool_calls(env, [retrieve_call])[0]
        # print("AAAAA"*100)
        try:
            snippets = raw_retrieval_results[9:] #"remove result: str"
            # print(snippets)
        except:
            snippets = ""
            # print(f"[ReCall] Retriever call failed to parse JSON, got:\n{raw_retrieval_results!r}")

        # ── Step 1: build the injected user prompt ────────────────────────────
        if snippets:
            
            user_content = (
                f"Question: {question}\n\n"
                "Here are some relevant PubMed snippets:\n"
                f"{snippets}"
            )
        else:
            user_content = f"Question: {question}"

        # ── Step 2: start the chat history ────────────────────────────────────
        sys_content = self.system_prompt_forcing_tool_call
        messages = [
            {"role": "system",  "content": sys_content},
            {"role": "user",    "content": user_content},
        ]
        client = Together(api_key=api_key)

        # ── Step 3: your normal ReCall tool‐calling loop ─────────────────────
        for turn in range(10):
            resp = client.chat.completions.create(
                model       = model_name,
                messages    = messages,
                temperature = temperature,
                top_p       = top_p,
                max_tokens  = max_tokens,
                stop        = ["</tool_call>", "<|end▁of▁sentence|>"]
            )

            assistant_text = resp.choices[0].message.content
            messages.append({"role": "assistant", "content": assistant_text})

            tool_calls = self.extract_tool_calls(assistant_text)
            if not tool_calls:
                break

            # Execute all of the tool calls in one go
            results = self.execute_tool_calls(env, tool_calls)
            # and append them back in the required <tool_response> format
            tool_resp_block = "".join(
                f"<tool_response>{call}\n{out}\n</tool_response>\n"
                for call, out in zip(tool_calls, results)
            )
            messages.append({"role": "user", "content": tool_resp_block})

        # ── Step 4: flatten to a single trajectory ────────────────────────────
        trajectory = "\n".join(
            f"<{m['role']}>\n{m['content']}"
            for m in messages
            if m["role"] != "system"
        )
        return trajectory