acecalisto3 commited on
Commit
57458ec
·
verified ·
1 Parent(s): 1aff827

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +522 -1192
agent.py CHANGED
@@ -1,1200 +1,530 @@
1
- import json
2
- import re
3
- import os
4
- from datetime import datetime
5
- import importlib
6
- import io
7
- import contextlib
8
-
9
- # Viral Content Generator Agent
10
- PREFIX = """You are an Advanced Viral Content Generator with Self-Research and Self-Improvement Capabilities
11
- You can generate viral content across multiple formats: blog articles, books, review articles, and academic papers
12
- You have access to the following tools and capabilities:
13
- - Self-Insight: Generate new content ideas based on trending topics and user preferences
14
- - Self-Research: Use real-time internet searches to gather information for your content
15
- - Content Generation: Create content in various formats with viral potential
16
- - Self-Evaluation: Assess the quality and potential of your generated content
17
- - Publishing Pipeline: Format and structure content for publication
18
-
19
- Trigger tools by using this format:
20
- action: TOOL_NAME action_input=YOUR_INPUT
21
-
22
- Your workflow:
23
- 1. Generate or receive content ideas
24
- 2. Research the topic thoroughly
25
- 3. Create content in the requested format (or determine the optimal format)
26
- 4. Self-evaluate and improve the content
27
- 5. Format for publication
28
- 6. Repeat or generate new ideas based on performance
29
-
30
- Current Date/Time: {date_time_str}
31
-
32
- Purpose:
33
- {purpose}
34
  """
35
-
36
- ACTION_PROMPT = """
37
- You have access to the following tools:
38
- - action: GENERATE_IDEA action_input=TOPIC_DESCRIPTION
39
- - action: RESEARCH action_input=TOPIC_TO_RESEARCH
40
- - action: GENERATE_CONTENT action_input=TOPIC_FORMAT_TYPE
41
- - action: SELF_EVALUATE action_input=CONTENT_TO_EVALUATE
42
- - action: IMPROVE_CONTENT action_input=CONTENT_AND_FEEDBACK
43
- - action: FORMAT_CONTENT action_input=CONTENT_AND_FORMAT_REQUIREMENTS
44
- - action: PUBLISH action_input=FINAL_CONTENT
45
- - action: COMPLETE
46
-
47
- Instructions:
48
- - Use GENERATE_IDEA to come up with new viral content topics
49
- - Use RESEARCH to search the internet for information about your topic
50
- - Use GENERATE_CONTENT to create content in various formats (blog, book, review, paper)
51
- - Use SELF_EVALUATE to assess the quality and viral potential of your content
52
- - Use IMPROVE_CONTENT to enhance your content based on evaluation
53
- - Use FORMAT_CONTENT to prepare content for publication
54
- - Use PUBLISH to finalize your content for distribution
55
- - End with COMPLETE when task is finished
56
-
57
- Always use the following format:
58
- task: the input task you must complete
59
- thought: think about what your next step should be
60
- action: the action to take (one of [GENERATE_IDEA, RESEARCH, GENERATE_CONTENT, SELF_EVALUATE, IMPROVE_CONTENT, FORMAT_CONTENT, PUBLISH, COMPLETE]) action_input=XXX
61
- observation: the result of the action
62
- thought: consider the next step based on the observation
63
- ... (this thought/action/observation/thought cycle can repeat as needed)
64
- You are attempting to complete the task
65
- task: {task}
66
- {history}"""
67
-
68
- ACTION_PROMPT = """
69
- You have access to the following tools:
70
- - action: GENERATE_IDEA action_input=TOPIC_DESCRIPTION
71
- - action: RESEARCH action_input=TOPIC_TO_RESEARCH
72
- - action: GENERATE_CONTENT action_input=TOPIC_FORMAT_TYPE
73
- - action: SELF_EVALUATE action_input=CONTENT_TO_EVALUATE
74
- - action: IMPROVE_CONTENT action_input=CONTENT_AND_FEEDBACK
75
- - action: FORMAT_CONTENT action_input=CONTENT_AND_FORMAT_REQUIREMENTS
76
- - action: PUBLISH action_input=FINAL_CONTENT
77
- - action: COMPLETE
78
-
79
- Instructions:
80
- - Use GENERATE_IDEA to come up with new viral content topics
81
- - Use RESEARCH to search the internet for information about your topic
82
- - Use GENERATE_CONTENT to create content in various formats (blog, book, review, paper)
83
- - Use SELF_EVALUATE to assess the quality and viral potential of your content
84
- - Use IMPROVE_CONTENT to enhance your content based on evaluation
85
- - Use FORMAT_CONTENT to prepare content for publication
86
- - Use PUBLISH to finalize your content for distribution
87
- - End with COMPLETE when task is finished
88
-
89
- Always use the following format:
90
- task: the input task you must complete
91
- thought: think about what your next step should be
92
- action: the action to take (one of [GENERATE_IDEA, RESEARCH, GENERATE_CONTENT, SELF_EVALUATE, IMPROVE_CONTENT, FORMAT_CONTENT, PUBLISH, COMPLETE]) action_input=XXX
93
- observation: the result of the action
94
- thought: consider the next step based on the observation
95
- ... (this thought/action/observation/thought cycle can repeat as needed)
96
- You are attempting to complete the task
97
- task: {task}
98
- {history}"""
99
-
100
- IDEA_GENERATOR_PROMPT = """
101
- You are a Viral Content Idea Generator
102
- Your purpose is to generate engaging, viral-worthy content ideas across multiple formats
103
- Consider trending topics, popular formats, controversial subjects, and knowledge gaps
104
- Content formats you can generate ideas for:
105
- - Blog articles (viral blog posts on trending topics)
106
- - Books (self-help, fiction, technical, etc.)
107
- - Review articles (product reviews, service reviews, book/movie reviews)
108
- - Academic papers (research, analysis, theoretical papers)
109
- - Social media content (short-form viral content)
110
- - Newsletters (curated content)
111
- - Research reports
112
-
113
- Task: {task}
114
- Current context: {history}
115
-
116
- Generate a compelling content idea with potential for virality. Consider:
117
- - Current trending topics
118
- - Underserved niches
119
- - Controversial or debatable topics
120
- - Educational value combined with entertainment
121
- - Emotional triggers (curiosity, surprise, outrage, joy, fear)
122
- - Format that would maximize engagement
123
-
124
- Idea:"""
125
-
126
- RESEARCH_PROMPT = """
127
- You are a Self-Research Content Agent
128
- Your task is to research the following topic and gather comprehensive information:
129
- {task}
130
-
131
- Research requirements:
132
- - Find the most current and relevant information
133
- - Gather statistics, data, and evidence
134
- - Identify expert opinions and viewpoints
135
- - Collect examples and case studies
136
- - Look for controversies or debates in the topic area
137
- - Note recent developments or news related to the topic
138
-
139
- Use your research to build a comprehensive knowledge base for content creation.
140
- Research results:"""
141
-
142
- CONTENT_GENERATOR_PROMPT = """
143
- You are a Viral Content Creator
144
- Create compelling content with strong viral potential in the specified format.
145
- Topic: {task}
146
- Format type: {format_type}
147
- Research data: {research_data}
148
-
149
- Content creation guidelines:
150
- - Create a hook in the first paragraph that makes people want to read more
151
- - Use storytelling elements to engage the audience
152
- - Include surprising facts or statistics if available
153
- - Address a specific pain point or provide a solution
154
- - Use emotional triggers appropriately
155
- - Include visual elements if possible (imagined for now)
156
- - End with a strong call to action or thought-provoking conclusion
157
- - Format appropriately for the content type:
158
- * Blog: Engaging title, subheadings, readable paragraphs, conclusion
159
- * Book: Outline, chapters with appropriate content, references
160
- * Review: Clear structure, pros/cons, rating, recommendation
161
- * Academic Paper: Abstract, introduction, methodology, results, discussion, references
162
-
163
- Write your content:"""
164
-
165
- EVALUATION_PROMPT = """
166
- You are a Content Quality Assessor
167
- Evaluate the following content for viral potential and quality:
168
- Content: {content}
169
- Evaluation criteria:
170
- - Engagement potential (likelihood to be shared/commented on)
171
- - Information quality and accuracy
172
- - Originality and uniqueness
173
- - Emotional impact
174
- - Structure and readability
175
- - Credibility of sources (if applicable)
176
- - Headline strength (if applicable)
177
- - Call to action effectiveness
178
-
179
- Provide a score (1-10) for each criterion and overall viral potential.
180
- Suggest specific improvements to increase viral potential.
181
- Evaluation:"""
182
-
183
- IMPROVEMENT_PROMPT = """
184
- You are a Content Improvement Specialist
185
- Improve the following content based on the evaluation feedback:
186
- Content: {content}
187
- Evaluation feedback: {feedback}
188
-
189
- Make specific improvements focusing on:
190
- - Increasing engagement potential
191
- - Strengthening weak areas identified in evaluation
192
- - Enhancing emotional triggers
193
- - Improving readability and flow
194
- - Adding viral elements
195
- - Optimizing for the target format
196
-
197
- Improved content:"""
198
-
199
- FORMATTING_PROMPT = """
200
- You are a Content Formatter
201
- Format the following content for publication according to the specified requirements:
202
- Content: {content}
203
- Format requirements: {format_requirements}
204
-
205
- Apply appropriate:
206
- - Structure and hierarchy
207
- - Headings and subheadings
208
- - Styling for readability
209
- - Citations and references (if academic)
210
- - Metadata for publishing platform
211
- - SEO elements (titles, tags, descriptions)
212
- - Visual element placeholders
213
- - Conclusion and call to action
214
-
215
- Formatted content:"""
216
-
217
- PUBLISHING_PROMPT = """
218
- You are a Publication Manager
219
- Prepare the final content for publishing:
220
- Content: {content}
221
- Publishing requirements: {publishing_requirements}
222
-
223
- Tasks:
224
- - Verify all formatting is complete
225
- - Check for any last-minute errors
226
- - Generate any additional metadata needed
227
- - Prepare for distribution
228
- - Create publishing manifest
229
- - Mark content as ready for distribution
230
-
231
- Publication-ready content:"""
232
-
233
- TASK_PROMPT = """
234
- You are determining the next task in the viral content generation pipeline
235
- Current task: {task}
236
- Progress so far: {history}
237
-
238
- Based on the purpose and progress, what should the next specific task be?
239
- Consider:
240
- - What information or step is missing
241
- - What would move the content generation forward most effectively
242
- - If research is needed
243
- - If content creation should begin
244
- - If evaluation or improvement is needed
245
- - If formatting for publication is required
246
- Next task:"""
247
-
248
- SEARCH_QUERY = """
249
- You are determining what to search for to gather information for viral content
250
- Topic: {task}
251
- Research progress: {history}
252
-
253
- What specific information do you need to search for to create viral content about this topic?
254
- Formulate a search query or list of search terms:"""
255
-
256
- LOG_PROMPT = """
257
- PROMPT
258
- **************************************
259
- {}
260
- **************************************
261
- """
262
-
263
- LOG_RESPONSE = """
264
- RESPONSE
265
- **************************************
266
- {}
267
- **************************************
268
  """
 
269
 
 
 
 
 
 
 
 
270
  import random
 
 
 
 
 
 
271
  import requests
272
- import time
273
- from typing import Dict, List, Any, Optional
274
-
275
- class IdeaGeneratorAgent:
276
- """
277
- Internal agent responsible for generating viral content ideas
278
- """
279
- def __init__(self):
280
- self.inspiration_sources = [
281
- "trending_topics",
282
- "social_media",
283
- "news_outlets",
284
- "academic_papers",
285
- "popular_discussions",
286
- "user_feedback_patterns"
287
- ]
288
- self.viral_factors = [
289
- "emotional_trigger",
290
- "controversy",
291
- "surprise_element",
292
- "practical_value",
293
- "storytelling",
294
- "visual_appeal",
295
- "social_proof"
296
- ]
297
- self.content_formats = [
298
- "blog_article",
299
- "book_chapter",
300
- "review_article",
301
- "academic_paper",
302
- "social_media_post",
303
- "newsletter"
304
- ]
305
-
306
- def generate_idea(self, topic_description: str = "") -> str:
307
- """
308
- Generate viral content ideas based on trending topics and self-inspiration
309
- """
310
- import random
311
- if not topic_description:
312
- # Self-inspire by checking trending topics and viral factors
313
- inspiration_source = random.choice(self.inspiration_sources)
314
- viral_factor = random.choice(self.viral_factors)
315
- content_format = random.choice(self.content_formats)
316
-
317
- # This is a simplified version - in practice, this would connect to real trending data
318
- idea_templates = [
319
- f"How {viral_factor.title()} Can Transform Your {random.choice(['Life', 'Business', 'Career'])} - A Comprehensive {content_format.replace('_', ' ').title()}",
320
- f"The {random.choice(['Shocking', 'Surprising', 'Unexpected'])} Truth About {random.choice(['Productivity', 'Health', 'Money', 'Relationships'])}: {content_format.replace('_', ' ').title()}",
321
- f"Why Everyone is Talking About {random.choice(['AI', 'Crypto', 'Remote Work', 'Sustainability', 'Mental Health'])} in 2025: A {content_format.replace('_', ' ').title()}",
322
- f"{random.randint(5, 15)} {random.choice(['Strategies', 'Tips', 'Secrets', 'Hacks'])} for {random.choice(['Success', 'Happiness', 'Wealth', 'Health'])} That Actually Work: {content_format.replace('_', ' ').title()}",
323
- f"The {random.choice(['Ultimate', 'Complete', 'Definitive'])} Guide to {random.choice(['Mastering', 'Understanding', 'Implementing'])} {random.choice(['AI', 'Blockchain', 'Digital Marketing', 'Personal Finance'])}: {content_format.replace('_', ' ').title()}",
324
- f"{random.choice(['Debunking', 'Exploring', 'Analyzing'])} The {random.choice(['Most Controversial', 'Most Popular', 'Most Misunderstood'])} {content_format.replace('_', ' ').title()} About {random.choice(['Technology', 'Science', 'Psychology', 'Economics'])}"
325
- ]
326
-
327
- return random.choice(idea_templates)
328
- else:
329
- # Generate an idea based on the provided description
330
- format_type = random.choice(self.content_formats)
331
- return f"{topic_description}: A {format_type.replace('_', ' ').title()} Exploring Key Insights and Findings"
332
-
333
-
334
- class ResearchAgent:
335
- """
336
- Internal agent responsible for researching topics
337
- """
338
- def __init__(self):
339
- pass
340
-
341
- def research_topic(self, topic: str) -> Dict[str, Any]:
342
- """
343
- Conduct self-research on a topic using simulated data
344
- In practice, this would connect to real research APIs
345
- """
346
- import random
347
- # Simulated research data - in a real implementation, this would connect to web search APIs
348
- research_data = {
349
- "title": topic,
350
- "summary": f"Comprehensive research on {topic} covering key aspects, trends, debates, and insights.",
351
- "key_points": [
352
- f"Key insight 1 about {topic}",
353
- f"Key insight 2 about {topic}",
354
- f"Key insight 3 about {topic}",
355
- ],
356
- "statistics": [
357
- f"{random.randint(60, 95)}% of experts believe {topic} is important",
358
- f"Studies show {random.randint(2, 5)}x improvement when following best practices for {topic}",
359
- f"{random.randint(100, 1000)} million people are interested in {topic}",
360
- ],
361
- "controversies": [
362
- f"Debate around approach A vs approach B in {topic}",
363
- f"Conflicting studies on the effectiveness of {topic}",
364
- ],
365
- "expert_opinions": [
366
- f"Expert Dr. {random.choice(['Smith', 'Johnson', 'Williams'])} states that {topic} 'is the future'",
367
- f"Author of bestseller 'The Truth About {topic}' believes it's 'overhyped'",
368
- ],
369
- "recent_developments": [
370
- f"New study published this month on {topic}",
371
- f"Major breakthrough announced in {topic} research",
372
- ],
373
- "sources": [
374
- f"https://research.example.com/{topic.replace(' ', '_')}/latest",
375
- f"https://news.example.com/{topic.replace(' ', '_')}/trends",
376
- ]
377
- }
378
- return research_data
379
-
380
-
381
- class ContentCreationAgent:
382
- """
383
- Internal agent responsible for creating content
384
- """
385
- def __init__(self):
386
- self.content_formats = [
387
- "blog_article",
388
- "book_chapter",
389
- "review_article",
390
- "academic_paper",
391
- "social_media_post",
392
- "newsletter"
393
- ]
394
-
395
- def generate_content(self, topic: str, format_type: str, research_data: Dict[str, Any]) -> str:
396
- """
397
- Generate viral content in the specified format
398
- """
399
- import random
400
- from datetime import datetime
401
- # Determine the format-specific content structure
402
- if format_type == "blog_article":
403
- return self._generate_blog_article(topic, research_data)
404
- elif format_type == "book_chapter":
405
- return self._generate_book_chapter(topic, research_data)
406
- elif format_type == "review_article":
407
- return self._generate_review_article(topic, research_data)
408
- elif format_type == "academic_paper":
409
- return self._generate_academic_paper(topic, research_data)
410
- else:
411
- return self._generate_generic_content(topic, format_type, research_data)
412
-
413
- def _generate_blog_article(self, topic: str, research_data: Dict[str, Any]) -> str:
414
- """
415
- Generate a viral blog article
416
- """
417
- import random
418
- from datetime import datetime
419
- blog_content = f"""
420
- # {topic}
421
-
422
- ## Introduction
423
- In today's fast-paced world, understanding {topic} has become more crucial than ever. Recent research shows that {random.choice(research_data.get('statistics', ['']))}. This comprehensive guide will explore everything you need to know about this fascinating subject.
424
-
425
- ## The Key Insights
426
-
427
- ### {research_data.get('key_points', [''])[0]}
428
- {random.choice(['This is groundbreaking because...', 'The implications are fascinating...', 'Experts have been buzzing about this because...'])} {random.choice(research_data.get('recent_developments', ['']))}.
429
-
430
- ### {research_data.get('key_points', [''])[1]}
431
- {random.choice(['According to experts...', 'Research has shown...', 'The data clearly indicates...'])} {random.choice(research_data.get('expert_opinions', ['']))}.
432
-
433
- ### {research_data.get('key_points', [''])[2]}
434
- {random.choice(['What makes this particularly interesting is...', 'This challenges conventional wisdom about...', 'The reason this is so impactful is...'])} {random.choice(research_data.get('controversies', ['']))}.
435
-
436
- ## The Bottom Line
437
- {topic} represents a {random.choice(['shift', 'revolution', 'breakthrough', 'evolution'])} in how we understand and {random.choice(['approach', 'think about', 'deal with'])} this subject. As {random.choice(research_data.get('recent_developments', ['']))}, it's essential to {random.choice(['stay informed', 'think critically', 'keep learning'])}.
438
-
439
- ## Conclusion
440
- The {random.choice(['future', 'potential', 'impact'])} of {topic} is {random.choice(['bright', 'unpredictable', 'transformative'])}, and {random.choice(['now', 'today', 'the present moment'])} is the perfect time to {random.choice(['get involved', 'learn more', 'take action'])}. What are your thoughts on this topic? Share them in the comments below!
441
-
442
- ---
443
- *Written by Viral Content Generator | Published on {datetime.now().strftime('%Y-%m-%d')}*
444
- """
445
- return blog_content
446
-
447
- def _generate_book_chapter(self, topic: str, research_data: Dict[str, Any]) -> str:
448
- """
449
- Generate a book chapter
450
- """
451
- import random
452
- from datetime import datetime
453
- chapter_content = f"""
454
- # Chapter {random.randint(1, 15)}: {topic}
455
-
456
- ## Abstract
457
- This chapter delves into the multifaceted aspects of {topic}, examining {random.choice(research_data.get('key_points', ['']))} with detailed analysis and practical applications.
458
-
459
- ## Introduction
460
- The study of {topic} has gained significant traction in recent years as {random.choice(research_data.get('recent_developments', ['']))}. This chapter provides an in-depth examination of the subject, offering insights that are both academically rigorous and practically applicable.
461
-
462
- ## Literature Review
463
- Previous research on {topic} has largely focused on {random.choice(research_data.get('key_points', ['']))}, with {random.choice(['pioneering', 'seminal', 'influential'])} works by {random.choice(['Smith (2020)', 'Johnson & Williams (2021)', 'Roberts et al. (2022)'])} establishing the foundational understanding.
464
-
465
- ## Analysis and Discussion
466
- Our analysis reveals several {random.choice(['important', 'notable', 'significant'])} findings regarding {topic}:
467
- 1. {random.choice(research_data.get('key_points', ['']))}
468
- 2. {random.choice(research_data.get('controversies', ['']))}
469
- 3. {random.choice(research_data.get('recent_developments', ['']))}
470
-
471
- These findings suggest that {topic} operates according to {random.choice(['different', 'more complex', 'evolving'])} principles than previously understood, with implications for {random.choice(['practitioners', 'researchers', 'policymakers'])}.
472
-
473
- ## Implications
474
- The implications of our research on {topic} extend to {random.choice(['multiple domains', 'several fields', 'various applications'])}, particularly in how {random.choice(['organizations', 'individuals', 'systems'])} {random.choice(['approach', 'implement', 'utilize'])} related concepts.
475
-
476
- ## Future Directions
477
- Future research should focus on {random.choice(['longitudinal studies', 'cross-cultural analyses', 'experimental validations'])} to further {random.choice(['refine', 'expand', 'validate'])} our understanding of {topic}.
478
-
479
- ## Conclusion
480
- In conclusion, {topic} represents a {random.choice(['critical', 'emerging', 'significant'])} area of study with far-reaching implications. As the field continues to evolve, it is {
481
- random.choice(['essential', 'important', 'crucial'])} to {random.choice(['maintain rigorous standards', 'continue exploring', 'build on existing knowledge'])}.
482
-
483
- ---
484
- *Chapter from "{topic}: A Comprehensive Guide" | {datetime.now().strftime('%Y')}*
485
- """
486
- return chapter_content
487
-
488
- def _generate_review_article(self, topic: str, research_data: Dict[str, Any]) -> str:
489
- """
490
- Generate a review article
491
- """
492
- import random
493
- from datetime import datetime
494
- review_content = f"""
495
- # {topic}: A Comprehensive Review
496
-
497
- ## Executive Summary
498
- This review examines {topic} by analyzing {random.randint(5, 15)} major sources, {random.randint(2, 8)} studies, and {random.randint(10, 30)} different perspectives to provide a balanced assessment.
499
-
500
- ## Introduction
501
- With {random.choice(research_data.get('statistics', ['']))}, {topic} has become a topic of significant interest. This review consolidates current knowledge and {random.choice(['evaluates', 'assesses', 'analyzes'])} the available information to provide {random.choice(['consumers', 'professionals', 'readers'])} with a comprehensive understanding.
502
-
503
- ## Product/Service Overview
504
- {topic} can be {random.choice(['defined as', 'described as', 'characterized by'])} {random.choice(research_data.get('key_points', ['']))}. The {random.choice(['primary', 'main', 'key'])} features include:
505
- - {random.choice(['Innovation', 'Quality', 'Performance', 'Value'])} in {random.choice(['design', 'function', 'delivery', 'experience'])}
506
- - {random.choice(['Unique', 'Proven', 'Effective', 'Revolutionary'])} approach to {random.choice(['problem-solving', 'service delivery', 'value creation'])}
507
- - {random.choice(['Sustainable', 'Ethical', 'Modern', 'Advanced'])} practices and methodologies
508
-
509
- ## Pros and Cons
510
-
511
- ### Pros
512
- 1. {random.choice(research_data.get('key_points', ['']))}
513
- 2. {random.choice(research_data.get('expert_opinions', ['']))}
514
- 3. {random.choice(research_data.get('statistics', ['']))}
515
-
516
- ### Cons
517
- 1. {random.choice(research_data.get('controversies', ['']))}
518
- 2. Potential {random.choice(['limitation', 'concern', 'drawback'])} regarding {random.choice(['cost', 'accessibility', 'scalability'])}
519
- 3. {random.choice(['Ongoing', 'Current', 'Underlying'])} debate about {random.choice(['effectiveness', 'safety', 'reliability'])}
520
-
521
- ## Performance Analysis
522
- Based on our review, {topic} {random.choice(['exceeds', 'meets', 'approaches'])} expectations in {random.choice(['most', 'several', 'many'])} areas, with {random.choice(['particularly', 'especially', 'notably'])} strong performance in {random.choice(['functionality', 'usability', 'durability', 'value'])}.
523
-
524
- ## User Feedback
525
- {random.choice(research_data.get('statistics', ['']))} of users report {random.choice(['high', 'satisfactory', 'positive'])} experiences with {topic}, though {random.choice(['some', 'a minority', 'certain'])} have raised {random.choice(['concerns', 'questions', 'issues'])} about {random.choice(['pricing', 'support', 'features'])}.
526
-
527
- ## Value Assessment
528
- Considering all factors, {topic} offers {random.choice(['exceptional', 'good', 'reasonable'])} value for {random.choice(['its price', 'its features', 'the market segment'])} {random.choice(['with', 'and'])} {random.choice(['minimal', 'some', 'significant'])} room for improvement in {random.choice(['specific', 'certain', 'particular'])} areas.
529
-
530
- ## Final Verdict
531
- Overall, {topic} receives a {random.randint(7, 9)}/10 rating, recommended for {random.choice(['beginners', 'intermediates', 'advanced users'])} looking for {random.choice(['quality', 'innovation', 'reliability'])} in this category.
532
-
533
- ### Rating Breakdown
534
- - Features: {random.randint(7, 10)}/10
535
- - Performance: {random.randint(7, 10)}/10
536
- - Value: {random.randint(6, 9)}/10
537
- - User Satisfaction: {random.randint(7, 9)}/10
538
-
539
- ### The Bottom Line
540
- {topic} stands out as a {random.choice(['solid', 'outstanding', 'competent'])} {random.choice(['option', 'choice', 'solution'])} in its category, offering {random.choice(['reliable', 'proven', 'effective'])} results with {random.choice(['minimal', 'manageable', 'acceptable'])} trade-offs.
541
-
542
- ---
543
- *Review by Viral Content Generator | Published: {datetime.now().strftime('%Y-%m-%d')}*
544
- """
545
- return review_content
546
-
547
- def _generate_academic_paper(self, topic: str, research_data: Dict[str, Any]) -> str:
548
- """
549
- Generate an academic paper
550
- """
551
- import random
552
- from datetime import datetime
553
- paper_content = f"""
554
- # {topic}: A Theoretical and Empirical Analysis
555
-
556
- ## Abstract
557
- This paper examines {topic} through both theoretical frameworks and empirical data. Our {random.choice(['analysis', 'study', 'investigation'])} reveals {random.choice(research_data.get('key_points', ['']))}, with significant implications for {random.choice(['theory', 'practice', 'future research'])}. The findings suggest that {random.choice(['conventional wisdom', 'existing models', 'current understanding'])} regarding {topic} may require {random.choice(['revision', 'update', 'expansion'])}.
558
-
559
- **Keywords:** {topic}, {random.choice(['research', 'analysis', 'study'])}, {random.choice(['theory', 'methodology', 'application'])}, {random.choice(['findings', 'results', 'implications'])}
560
-
561
- ## 1. Introduction
562
- The {random.choice(['growing', 'increasing', 'expanding'])} relevance of {topic} in {random.choice(['contemporary', 'modern', 'current'])} {random.choice(['contexts', 'environments', 'settings'])} {random.choice(['necessitates', 'requires', 'demands'])} thorough investigation. As {random.choice(research_data.get('recent_developments', ['']))}, researchers and practitioners have turned their attention to understanding the {random.choice(['complexities', 'nuances', 'mechanisms'])} underlying {topic}.
563
-
564
- This paper contributes to the literature by {random.choice(['providing', 'offering', 'presenting'])} a {random.choice(['comprehensive', 'novel', 'integrated'])} perspective on {topic} that {random.choice(['synthesizes', 'extends', 'challenges'])} existing {random.choice(['theories', 'models', 'frameworks'])}.
565
-
566
- ## 2. Literature Review
567
- Previous research on {topic} has {random.choice(['established', 'explored', 'examined'])} several {random.choice(['key', 'important', 'fundamental'])} {random.choice(['dimensions', 'aspects', 'elements'])}. {random.choice(['Seminal', 'Foundational', 'Pioneering'])} work by {random.choice(['Smith (2020)', 'Johnson et al. (2021)', 'Williams & Brown (2022)'])} first {random.choice(['identified', 'proposed', 'established'])} the {random.choice(['theoretical', 'conceptual', 'empirical'])} framework for understanding {topic}.
568
-
569
- Subsequent {random.choice(['studies', 'research', 'investigations'])} by {random.choice(['Davis (2021)', 'Miller & Wilson (2022)', 'Taylor et al. (2023)'])} have {random.choice(['extended', 'refined', 'challenged'])} this framework, {random.choice(['revealing', 'demonstrating', 'showing'])} {random.choice(['new', 'additional', 'alternative'])} {random.choice(['insights', 'dimensions', 'mechanisms'])}. However, {random.choice(['gaps', 'limitations', 'inconsistencies'])} remain in the literature, particularly with respect to {random.choice(['methodological', 'theoretical', 'practical'])} {random.choice(['approaches', 'considerations', 'applications'])}.
570
-
571
- ## 3. Methodology
572
- Our {random.choice(['approach', 'method', 'methodology'])} combines {random.choice(['quantitative', 'qualitative', 'mixed-method'])} techniques to {random.choice(['investigate', 'examine', 'analyze'])} {topic}. We {random.choice(['collected', 'gathered', 'obtained'])} data from {random.randint(100, 1000)} {random.choice(['participants', 'sources', 'cases'])} using {random.choice(['surveys', 'interviews', 'observations', 'existing datasets'])}, applying {random.choice(['statistical', 'content', 'thematic'])} analysis to address our research questions.
573
-
574
- ## 4. Results
575
- Our findings {random.choice(['indicate', 'suggest', 'demonstrate'])} that {topic} {random.choice(['operates', 'functions', 'behaves'])} in {random.choice(['accordance with', 'deviation from', 'addition to'])} {random.choice(['existing', 'current', 'traditional'])} {random.choice(['theories', 'models', 'understanding'])}. Specifically, we {random.choice(['observed', 'found', 'identified'])} {random.choice(research_data.get('key_points', ['']))}, which {random.choice(['confirms', 'challenges', 'extends'])} {random.choice(['previous', 'earlier', 'prior'])} research.
576
-
577
- ## 5. Discussion
578
- The implications of our research on {topic} are {random.choice(['significant', 'notable', 'important'])} for {random.choice(['scholars', 'practitioners', 'policy makers'])} in {random.choice(['the field', 'related domains', 'adjacent areas'])}. Our results {random.choice(['confirm', 'refine', 'challenge'])} the {random.choice(['prevailing', 'dominant', 'current'])} {random.choice(['paradigm', 'model', 'understanding'])} and {random.choice(['suggest', 'indicate', 'point'])} {random.choice(['directions', 'avenues', 'paths'])} for {random.choice(['future', 'subsequent', 'further'])} research.
579
-
580
- ## 6. Conclusions
581
- This paper has {random.choice(['contributed', 'advanced', 'enhanced'])} understanding of {topic} by {random.choice(['integrating', 'synthesizing', 'extending'])} {random.choice(['theoretical', 'empirical', 'conceptual'])} perspectives. Future research should {random.choice(['focus on', 'explore', 'investigate'])} {random.choice(research_data.get('controversies', ['']))} and {random.choice(['examine', 'analyze', 'study'])} the {random.choice(['long-term', 'practical', 'broader'])} implications of our findings.
582
-
583
- ## References
584
- {random.choice(['Smith, J. (2020). Understanding {topic}. Journal of Advanced Research, 15(3), 123-145.',
585
- 'Miller, K., & Johnson, L. (2021). The evolution of {topic}. Contemporary Studies, 8(2), 45-67.',
586
- 'Williams, R. (2022). {topic}: Past, present, and future. Academic Press.',
587
- 'Taylor, M. et al. (2023). New perspectives on {topic}. Modern Research Quarterly, 22(1), 89-105.']).format(topic=topic)}
588
-
589
- ---
590
- *Academic Paper | {datetime.now().strftime('%Y-%m-%d')}*
591
- """
592
- return paper_content
593
-
594
- def _generate_generic_content(self, topic: str, format_type: str, research_data: Dict[str, Any]) -> str:
595
- """
596
- Generate content for formats not specifically handled
597
- """
598
- import random
599
- from datetime import datetime
600
- return f"""
601
- # {topic}
602
-
603
- ## Overview
604
- This {format_type.replace('_', ' ')} explores the key aspects of {topic}, drawing on current research and {random.choice(research_data.get('key_points', ['']))}.
605
-
606
- ## Key Points
607
- 1. {random.choice(research_data.get('key_points', ['']))}
608
- 2. {random.choice(research_data.get('controversies', ['']))}
609
- 3. {random.choice(research_data.get('recent_developments', ['']))}
610
-
611
- ## Conclusion
612
- {topic} represents a significant development in its field, with important implications for {random.choice(['practitioners', 'researchers', 'consumers'])}. As {random.choice(research_data.get('recent_developments', ['']))}, continued attention to this topic is warranted.
613
-
614
- ---
615
- *Generated by Viral Content Generator on {datetime.now().strftime('%Y-%m-%d')}*
616
- """
617
-
618
-
619
- class EvaluationAgent:
620
- """
621
- Internal agent responsible for evaluating content quality and viral potential
622
- """
623
- def __init__(self):
624
- pass
625
-
626
- def evaluate_content(self, content: str) -> Dict[str, Any]:
627
- """
628
- Evaluate content for viral potential and quality
629
- """
630
- import random
631
- evaluation = {
632
- "engagement_potential": random.randint(6, 10),
633
- "information_quality": random.randint(7, 10),
634
- "originality": random.randint(6, 9),
635
- "emotional_impact": random.randint(5, 9),
636
- "structure_readability": random.randint(7, 10),
637
- "credibility": random.randint(6, 9),
638
- "headline_strength": random.randint(6, 9),
639
- "call_to_action": random.randint(5, 8),
640
- "overall_viral_potential": random.randint(6, 9),
641
- "strengths": [
642
- "Well-researched content",
643
- "Good structure and flow",
644
- "Addresses key points effectively"
645
- ],
646
- "improvements": [
647
- "Add more emotional triggers",
648
- "Include visual elements",
649
- "Strengthen the conclusion",
650
- "Add more statistics to support claims"
651
- ]
652
- }
653
-
654
- return evaluation
655
-
656
-
657
- class ImprovementAgent:
658
- """
659
- Internal agent responsible for improving content based on evaluation
660
- """
661
- def __init__(self):
662
- pass
663
-
664
- def improve_content(self, content: str, feedback: Dict[str, Any]) -> str:
665
- """
666
- Improve content based on evaluation feedback
667
- """
668
- import random
669
- # This is a simplified version - in practice, would use more sophisticated NLP
670
- improvements = feedback.get('improvements', [])
671
-
672
- improved_content = content
673
- for improvement in improvements:
674
- if "emotional triggers" in improvement:
675
- # Insert emotional language
676
- improved_content = improved_content.replace(
677
- "This is important",
678
- "This is shockingly important and will change everything you thought you knew"
679
- ).replace(
680
- "Consider",
681
- "You need to consider this crucial point right now"
682
- )
683
- elif "visual elements" in improvement:
684
- improved_content += f"\n\n*[Visual element: {random.choice(['chart', 'infographic', 'diagram'])} illustrating key points about {content[:20]}...]*\n"
685
- elif "statistics" in improvement:
686
- improved_content = improved_content.replace(
687
- "The research shows",
688
- f"The {random.randint(7, 9)} key studies show that {random.choice(['over 70%', 'nearly 80%', 'about 75%'])} of cases demonstrate"
689
- )
690
-
691
- return improved_content
692
-
693
-
694
- class FormattingAgent:
695
- """
696
- Internal agent responsible for formatting content for publication
697
- """
698
- def __init__(self):
699
- from datetime import datetime
700
- self.datetime = datetime
701
-
702
- def format_content(self, content: str, requirements: Dict[str, Any]) -> str:
703
- """
704
- Format content for publication
705
- """
706
- # Apply formatting based on requirements
707
- formatted_content = f"# {requirements.get('title', 'Virally Generated Content')}\n\n"
708
- formatted_content += content
709
-
710
- # Add metadata if requested
711
- if requirements.get('include_metadata'):
712
- formatted_content += f"\n\n---\n**Published:** {self.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
713
- formatted_content += f"\n**Generated by:** Viral Content Generator Agent"
714
- formatted_content += f"\n**Format:** {requirements.get('format', 'Generic')}"
715
- formatted_content += f"\n**Tags:** {', '.join(requirements.get('tags', ['viral', 'content', 'generated']))}"
716
-
717
- # Add call to action if requested
718
- if requirements.get('include_cta'):
719
- cta_options = [
720
- "Share this content if you found it valuable!",
721
- "Subscribe for more viral content like this!",
722
- "Comment below with your thoughts on this topic!",
723
- "Follow for daily viral content updates!"
724
- ]
725
- formatted_content += f"\n\n> {random.choice(cta_options)}"
726
-
727
- return formatted_content
728
-
729
-
730
- class PublishingAgent:
731
- """
732
- Internal agent responsible for preparing content for distribution
733
- """
734
- def __init__(self):
735
- import time
736
- from datetime import datetime
737
- self.time = time
738
- self.datetime = datetime
739
-
740
- def publish_content(self, content: str, requirements: Dict[str, Any]) -> str:
741
- """
742
- Prepare content for publishing/distribution
743
- """
744
- # In a real implementation, this would handle actual publishing
745
- publishing_manifest = {
746
- "content_id": f"VCG_{int(self.time.time())}",
747
- "publish_time": self.datetime.now().isoformat(),
748
- "format": requirements.get('format', 'generic'),
749
- "target_platform": requirements.get('platform', 'multi-platform'),
750
- "estimated_reach": f"{random.randint(100, 10000)}+ potential readers",
751
- "virality_score": random.randint(7, 10),
752
- "hashtags": requirements.get('hashtags', ['#ViralContent', '#AIContent', f'#{content.split()[0] if content.split() else "Generated"}'])
753
- }
754
-
755
- published_content = f"[PUBLICATION MANIFEST: {json.dumps(publishing_manifest, indent=2)}]\n\n{content}"
756
- return published_content
757
-
758
-
759
- class ViralContentGeneratorAgent:
760
- """
761
- Advanced viral content generation agent with self-inspiration,
762
- self-research, and self-publishing capabilities
763
- Uses internal dialog between specialized agents for autonomous operation
764
- """
765
-
766
- def __init__(self):
767
- self.content_history = []
768
- self.idea_agent = IdeaGeneratorAgent()
769
- self.research_agent = ResearchAgent()
770
- self.content_agent = ContentCreationAgent()
771
- self.evaluation_agent = EvaluationAgent()
772
- self.improvement_agent = ImprovementAgent()
773
- self.formatting_agent = FormattingAgent()
774
- self.publishing_agent = PublishingAgent()
775
- self.book_outline = None
776
- self.current_chapter = 0
777
- self.task_queue = []
778
- self.research_data = {}
779
-
780
- def generate_idea(self, topic_description: str = "") -> str:
781
- """
782
- Generate viral content ideas based on trending topics and self-inspiration
783
- Delegates to internal IdeaGeneratorAgent
784
- """
785
- return self.idea_agent.generate_idea(topic_description)
786
-
787
- def research_topic(self, topic: str) -> Dict[str, Any]:
788
- """
789
- Conduct self-research on a topic using simulated data
790
- Delegates to internal ResearchAgent
791
- """
792
- return self.research_agent.research_topic(topic)
793
-
794
- def generate_content(self, topic: str, format_type: str, research_data: Dict[str, Any]) -> str:
795
- """
796
- Generate viral content in the specified format
797
- Delegates to internal ContentCreationAgent
798
- """
799
- return self.content_agent.generate_content(topic, format_type, research_data)
800
-
801
- def evaluate_content(self, content: str) -> Dict[str, Any]:
802
- """
803
- Evaluate content for viral potential and quality
804
- Delegates to internal EvaluationAgent
805
- """
806
- return self.evaluation_agent.evaluate_content(content)
807
-
808
- def improve_content(self, content: str, feedback: Dict[str, Any]) -> str:
809
- """
810
- Improve content based on evaluation feedback
811
- Delegates to internal ImprovementAgent
812
- """
813
- return self.improvement_agent.improve_content(content, feedback)
814
-
815
- def format_content(self, content: str, requirements: Dict[str, Any]) -> str:
816
- """
817
- Format content for publication
818
- Delegates to internal FormattingAgent
819
- """
820
- return self.formatting_agent.format_content(content, requirements)
821
-
822
- def publish_content(self, content: str, requirements: Dict[str, Any]) -> str:
823
- """
824
- Prepare content for publishing/distribution
825
- Delegates to internal PublishingAgent
826
- """
827
- return self.publishing_agent.publish_content(content, requirements)
828
-
829
- def complete_task(self) -> str:
830
- """
831
- Mark task as complete and provide summary
832
- """
833
- return f"Task completed successfully. Generated viral content ready for distribution. Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
834
-
835
- def run_autonomous_pipeline(self, generation_function, initial_prompt):
836
- log_stream = io.StringIO()
837
- with contextlib.redirect_stdout(log_stream):
838
- final_content = generation_function(initial_prompt)
839
- log_output = log_stream.getvalue()
840
- return f"""===== Pipeline Logs =====
841
- {log_output}
842
- ===== Generated Content =====
843
- {final_content}"""
844
-
845
- def autonomous_book_generation(self, initial_prompt: str = "") -> str:
846
- return self.run_autonomous_pipeline(self._autonomous_book_generation_impl, initial_prompt)
847
-
848
- def plan_task(self, book_idea, book_outline):
849
- """Generates a more detailed, but still programmatic, task list."""
850
- self.task_queue.append({"task": "research_topic", "topic": book_idea})
851
- self.task_queue.append({"task": "write_introduction", "book_idea": book_idea})
852
- self.task_queue.append({"task": "evaluate_content", "content_key": "introduction"})
853
-
854
- for i, chapter_title in enumerate(book_outline):
855
- self.task_queue.append({"task": "research_topic", "topic": chapter_title})
856
- self.task_queue.append({"task": "write_chapter", "chapter_index": i, "chapter_title": chapter_title})
857
- self.task_queue.append({"task": "evaluate_content", "content_key": f"chapter_{i}"})
858
- self.task_queue.append({"task": "refine_content", "content_key": f"chapter_{i}"})
859
-
860
- self.task_queue.append({"task": "review_and_edit"})
861
- self.task_queue.append({"task": "format_for_publishing"})
862
- self.task_queue.append({"task": "publish"})
863
-
864
- def execute_task(self, task):
865
- """Executes a single task from the task queue."""
866
- task_type = task["task"]
867
- if task_type == "research_topic":
868
- print(f"[Executor Agent]: Researching topic: {task['topic']}")
869
- self.research_data[task['topic']] = self.research_topic(task['topic'])
870
- print(f"[Executor Agent]: Research complete.")
871
- return ""
872
- elif task_type == "write_introduction":
873
- print(f"[Executor Agent]: Writing introduction...")
874
- research_data = self.research_data.get(task['book_idea'])
875
- content = self._generate_book_chapter("Introduction", research_data)
876
- return content
877
- elif task_type == "write_chapter":
878
- print(f"[Executor Agent]: Writing chapter {task['chapter_index'] + 1}...")
879
- research_data = self.research_data.get(task['chapter_title'])
880
- content = self._generate_book_chapter(task['chapter_title'], research_data)
881
- return content
882
- elif task_type == "evaluate_content":
883
- # This is a placeholder for a more complex evaluation process.
884
- print(f"[Executor Agent]: Evaluating content: {task['content_key']}")
885
- return ""
886
- elif task_type == "refine_content":
887
- # This is a placeholder for a more complex refinement process.
888
- print(f"[Executor Agent]: Refining content: {task['content_key']}")
889
- return ""
890
- elif task_type == "review_and_edit":
891
- print(f"[Executor Agent]: Reviewing and editing the book...")
892
- return "\n---\n*The book has been professionally reviewed and edited for clarity, consistency, and impact.*\n---\n"
893
- elif task_type == "format_for_publishing":
894
- print(f"[Executor Agent]: Formatting the book for publishing...")
895
- return "\n---\n*The book has been formatted for various publishing platforms.*\n---\n"
896
- elif task_type == "publish":
897
- print(f"[Executor Agent]: Publishing the book...")
898
- return "\n---\n*The book is now ready for publishing!*\n---\n"
899
-
900
- def _autonomous_book_generation_impl(self, initial_prompt: str = "") -> str:
901
- """
902
- Run the full autonomous book generation pipeline
903
- Creates entire books with multiple chapters, full research, and high-level content
904
- """
905
- import random
906
-
907
- if not initial_prompt or initial_prompt.lower().strip() in ["begin", "start", "go", ""]:
908
- initial_prompt = "Generate a full book on any trending topic with multiple chapters"
909
-
910
- book_idea = self.generate_idea(initial_prompt)
911
- print(f"[Idea Generator Agent]: Generated book idea - {book_idea}")
912
-
913
- book_research_data = self.research_topic(book_idea)
914
- print(f"[Research Agent]: Completed comprehensive research on '{book_research_data['title']}'")
915
-
916
- self.book_outline = self._generate_book_outline(book_idea, book_research_data)
917
- print(f"[Content Creation Agent]: Generated book outline with {len(self.book_outline)} chapters")
918
-
919
- self.plan_task(book_idea, self.book_outline)
920
- print(f"[Planner Agent]: Created a plan with {len(self.task_queue)} tasks.")
921
-
922
- full_book_content = ""
923
- while self.task_queue:
924
- task = self.task_queue.pop(0)
925
- print(f"[Executor Agent]: Executing task: {task['task']}")
926
- content = self.execute_task(task)
927
- full_book_content += content
928
-
929
- return full_book_content
930
-
931
-
932
- def _generate_book_outline(self, book_idea: str, research_data: Dict[str, Any]) -> list:
933
- """
934
- Generate a comprehensive book outline with multiple chapters
935
- """
936
- import random
937
-
938
- # Extract key themes from research data
939
- key_themes = [kp.split()[0] for kp in research_data.get('key_points', []) if len(kp.split()) > 0]
940
- if not key_themes:
941
- key_themes = ["Introduction", "Background", "Analysis", "Conclusion"]
942
-
943
- chapter_title_templates = [
944
- "The Genesis of {theme}",
945
- "Deconstructing {theme}: A Deep Dive",
946
- "{theme}: The Unseen Forces at Play",
947
- "The {theme} Revolution",
948
- "Case Studies in {theme}: Successes and Failures",
949
- "The Future of {theme}: Trends and Predictions",
950
- "The Dark Side of {theme}",
951
- "Mastering {theme}: A Practical Guide",
952
- "The Ethics of {theme}",
953
- "Beyond the Hype: The Reality of {theme}",
954
- ]
955
-
956
- # Generate chapter titles based on themes
957
- chapter_titles = []
958
- num_chapters = random.randint(8, 15) # Full book with 8-15 chapters
959
-
960
- for i in range(num_chapters):
961
- if i == 0:
962
- chapter_titles.append(f"Introduction: {book_idea}")
963
- elif i == num_chapters - 1:
964
- chapter_titles.append(f"Conclusion: The Road Ahead for {book_idea.split(' ')[0]}")
965
- else:
966
- theme = random.choice(key_themes)
967
- template = random.choice(chapter_title_templates)
968
- chapter_titles.append(template.format(theme=theme))
969
-
970
- return chapter_titles
971
-
972
- def _generate_book_chapter(self, chapter_title: str, chapter_research: Dict[str, Any]) -> str:
973
- """
974
- Generate detailed chapter content with full structure
975
- """
976
- import random
977
- from datetime import datetime
978
-
979
- chapter_content = f"""
980
- ## {chapter_title}
981
-
982
- ### Unveiling the Core Concepts
983
-
984
- This chapter dissects the fundamental aspects of **{chapter_title}**. As {random.choice(chapter_research.get('recent_developments', ['recent studies show']))}, a deeper understanding of these concepts has become not just beneficial, but paramount for {random.choice(['practitioners', 'readers', 'experts', 'anyone interested in this field'])}.
985
-
986
- We will explore several critical aspects, including:
987
- * **{random.choice(chapter_research.get('key_points', ['An important insight']))}**: We will unpack this and its far-reaching implications.
988
- * **{random.choice(chapter_research.get('controversies', ['A central debate']))}**: This chapter will shed light on the different viewpoints and what the evidence suggests.
989
- * **{random.choice(chapter_research.get('expert_opinions', ['An expert opinion']))}**: We will consider what the leading minds in the field are saying about this topic.
990
-
991
- ### A Deeper Analysis: What the Data Reveals
992
-
993
- {random.choice(['A closer look at the data reveals', 'Our examination of the evidence shows', 'The analysis of available information indicates'])} that {random.choice(chapter_research.get('statistics', ['a significant trend']))}. These findings {random.choice(['confirm', 'challenge', 'extend'])} {random.choice(['previous', 'current', 'traditional'])} {random.choice(['understanding', 'models', 'approaches'])} in the field, suggesting that we are on the cusp of a paradigm shift.
994
-
995
- ### The Real-World Implications
996
-
997
- The implications of these findings are {random.choice(['significant', 'notable', 'far-reaching'])} for {random.choice(['practitioners', 'researchers', 'stakeholders', 'society as a whole'])}. As {random.choice(chapter_research.get('recent_developments', ['the field evolves']))}, {random.choice(['organizations', 'individuals', 'we'])} must {random.choice(['adapt to', 'seriously consider', 'critically evaluate'])} these {random.choice(['new concepts', 'emerging approaches', 'latest findings'])} to stay ahead of the curve.
998
-
999
- ### Chapter Summary and a Look Ahead
1000
-
1001
- This chapter has {random.choice(['unpacked', 'explored', 'analyzed'])} the critical elements of **{chapter_title}**, providing {random.choice(['you, the reader', 'practitioners', 'the audience'])} with essential insights to navigate this complex landscape. The next chapter will {random.choice(['build upon', 'expand on', 'continue to explore'])} these {random.choice(['foundations', 'core concepts', 'fundamental principles'])} to {random.choice(['develop a more nuanced understanding', 'explore advanced topics', 'examine practical applications'])}.
1002
-
1003
- ---
1004
- """
1005
- return chapter_content
1006
- return chapter_content
1007
-
1008
- def autonomous_content_generation(self, initial_prompt: str = "") -> str:
1009
- return self.run_autonomous_pipeline(self._autonomous_content_generation_impl, initial_prompt)
1010
-
1011
- def _autonomous_content_generation_impl(self, initial_prompt: str = "") -> str:
1012
- """
1013
- Run the full viral content generation pipeline autonomously
1014
- Internal agents communicate and collaborate to generate viral content
1015
- """
1016
- import random
1017
- if not initial_prompt or initial_prompt.lower().strip() in ["begin", "start", "go", ""]:
1018
- # If the prompt is asking for a book or related to books, generate a full book
1019
- if any(word in initial_prompt.lower() for word in ["book", "books", "publish", "write", "complete", "full", "entire"]):
1020
- return self._autonomous_book_generation_impl(initial_prompt)
1021
- else:
1022
- initial_prompt = "Generate a viral content piece on any trending topic"
1023
-
1024
- # Step 1: Generate idea
1025
- idea = self.generate_idea(initial_prompt)
1026
- print(f"[Idea Generator Agent]: Generated idea - {idea}")
1027
-
1028
- # Step 2: Research the topic
1029
- research_data = self.research_topic(idea)
1030
- print(f"[Research Agent]: Completed research on '{research_data['title']}'")
1031
-
1032
- # Step 3: Determine content format and generate content
1033
- # Determine format based on prompt keywords or default to blog
1034
- format_type = "blog_article" # default
1035
- if "book" in initial_prompt.lower() or "chapter" in initial_prompt.lower():
1036
- # If book-related, generate a full book
1037
- return self._autonomous_book_generation_impl(initial_prompt)
1038
- elif "review" in initial_prompt.lower():
1039
- format_type = "review_article"
1040
- elif "paper" in initial_prompt.lower() or "academic" in initial_prompt.lower():
1041
- format_type = "academic_paper"
1042
- elif "social media" in initial_prompt.lower() or "post" in initial_prompt.lower():
1043
- format_type = "social_media_post"
1044
- else:
1045
- # Randomly select a format for truly autonomous generation
1046
- format_type = random.choice(["blog_article", "book_chapter", "review_article", "academic_paper"])
1047
-
1048
- content = self.generate_content(idea, format_type, research_data)
1049
- print(f"[Content Creation Agent]: Generated {format_type} content with {len(content)} characters")
1050
-
1051
- # Step 4: Evaluate the content
1052
- evaluation = self.evaluate_content(content)
1053
- print(f"[Evaluation Agent]: Content evaluated - Viral Potential: {evaluation['overall_viral_potential']}/10")
1054
-
1055
- # Step 5: Improve the content based on evaluation
1056
- improved_content = self.improve_content(content, evaluation)
1057
- print(f"[Improvement Agent]: Content improved based on evaluation feedback")
1058
-
1059
- # Step 6: Format the content for publication
1060
- format_requirements = {
1061
- 'include_metadata': True,
1062
- 'include_cta': True,
1063
- 'format': format_type,
1064
- 'title': idea,
1065
- 'tags': ['viral', 'content', 'generated', idea.split()[0] if idea.split() else 'AI']
1066
- }
1067
- formatted_content = self.format_content(improved_content, format_requirements)
1068
- print(f"[Formatting Agent]: Content formatted for publication with metadata")
1069
-
1070
- # Step 7: Prepare for publishing
1071
- publishing_requirements = {
1072
- 'format': format_type,
1073
- 'platform': 'multi-platform',
1074
- 'hashtags': ['#ViralContent', '#AIContent', f'#{idea.split()[0] if idea.split() else "Generated"}']
1075
  }
1076
- published_content = self.publish_content(formatted_content, publishing_requirements)
1077
- print(f"[Publishing Agent]: Content prepared for distribution with manifest")
1078
-
1079
- # Add to content history
1080
- self.content_history.append({
1081
- 'idea': idea,
1082
- 'format_type': format_type,
1083
- 'publish_time': datetime.now().isoformat(),
1084
- 'virality_score': evaluation['overall_viral_potential']
1085
- })
1086
-
1087
- return published_content
1088
-
1089
-
1090
- # Tool mappings for the viral content generator
1091
- TOOLS = {
1092
- "GENERATE_IDEA": {
1093
- "function": "generate_idea",
1094
- "description": "Generate viral content ideas across formats"
1095
- },
1096
- "RESEARCH": {
1097
- "function": "research_topic",
1098
- "description": "Research topics for content generation"
1099
- },
1100
- "GENERATE_CONTENT": {
1101
- "function": "generate_content",
1102
- "description": "Create content in specified format"
1103
- },
1104
- "SELF_EVALUATE": {
1105
- "function": "evaluate_content",
1106
- "description": "Evaluate content quality and viral potential"
1107
- },
1108
- "IMPROVE_CONTENT": {
1109
- "function": "improve_content",
1110
- "description": "Improve content based on evaluation"
1111
- },
1112
- "FORMAT_CONTENT": {
1113
- "function": "format_content",
1114
- "description": "Format content for publication"
1115
- },
1116
- "PUBLISH": {
1117
- "function": "publish_content",
1118
- "description": "Prepare content for distribution"
1119
- },
1120
- "COMPLETE": {
1121
- "function": "complete_task",
1122
- "description": "Mark task as complete"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1123
  }
1124
- }
1125
-
1126
- # Original constants for compatibility with app.py
1127
- COMPRESS_HISTORY_PROMPT = """
1128
- You are attempting to complete the task
1129
- task: {task}
1130
- Progress:
1131
- {history}
1132
- Compress the timeline of progress above
1133
- """
1134
 
1135
- LOG_PROMPT = """
1136
- PROMPT
1137
- **************************************
1138
- {}
1139
- **************************************
1140
- """
1141
-
1142
- LOG_RESPONSE = """
1143
- RESPONSE
1144
- **************************************
1145
- {}
1146
- **************************************
1147
- """
1148
-
1149
- # Additional prompts for compatibility
1150
- ADD_PROMPT = """
1151
- You are attempting to complete the task
1152
- task: {task}
1153
- Progress:
1154
- {history}
1155
- Write a new file called {file_path} with contents between ---
1156
- After the contents write a paragraph on what was inserted with details
1157
- """
1158
-
1159
- MODIFY_PROMPT = """
1160
- You are attempting to complete the task
1161
- task: {task}
1162
- Progress:
1163
- {history}
1164
- {file_path}
1165
- ---
1166
- {file_contents}
1167
- ---
1168
- Return the complete modified {file_path} contents between ---
1169
- After the contents write a paragraph on what was changed with details
1170
- """
1171
-
1172
- READ_PROMPT = """
1173
- You are attempting to complete the task
1174
- task: {task}
1175
- Progress:
1176
- {history}
1177
- {file_path}
1178
- ---
1179
- {file_contents}
1180
- ---
1181
- Return your thoughts about the file relevant to completing the task (in a paragraph)
1182
- Mention any specific functions, arguments, or details needed
1183
- """
1184
-
1185
- UNDERSTAND_TEST_RESULTS_PROMPT = """
1186
- You are attempting to complete the task
1187
- task: {task}
1188
- Progress:
1189
- {history}
1190
- Test results:
1191
- STDOUT
1192
- ---
1193
- {stdout}
1194
- ---
1195
- STDERR
1196
- ---
1197
- {stderr}
1198
- ---
1199
- Describe why the tests failed and how to fix them (in a paragraph)
1200
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ Enhanced Viral Content Agent
3
+ - Deterministic, testable, dependency-light
4
+ - Action loop with tool allow‑list and guarded parsing
5
+ - Pluggable LLM backends (Hugging Face Inference API, OpenAI, generic HTTP JSON API) with graceful fallback
6
+ - Research tool with real HTTP search (DuckDuckGo HTML) + Wikipedia summary fallback; offline synthetic fallback retained
7
+ - JSONL logging and reproducible runs via seed
8
+
9
+ Runtime targets: Python 3.9+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  """
11
+ from __future__ import annotations
12
 
13
+ import os
14
+ import re
15
+ import io
16
+ import json
17
+ import time
18
+ import uuid
19
+ import math
20
  import random
21
+ import logging
22
+ import contextlib
23
+ from dataclasses import dataclass, field
24
+ from datetime import datetime, timezone
25
+ from typing import Any, Dict, List, Optional, Tuple, Iterable
26
+
27
  import requests
28
+
29
+ # ---------------------------
30
+ # Logging
31
+ # ---------------------------
32
+ LOGGER_NAME = "viral_agent"
33
+ logger = logging.getLogger(LOGGER_NAME)
34
+ if not logger.handlers:
35
+ level = os.getenv("AGENT_LOG_LEVEL", "INFO").upper()
36
+ logging.basicConfig(level=getattr(logging, level, logging.INFO), format="%(asctime)s %(levelname)s | %(message)s")
37
+
38
+ # ---------------------------
39
+ # Prompts (kept concise; multi‑line strings)
40
+ # ---------------------------
41
+ PREFIX = (
42
+ "You are an Advanced Viral Content Generator with self‑research and self‑improvement capabilities.\n"
43
+ "Tools: GENERATE_IDEA, RESEARCH, GENERATE_CONTENT, SELF_EVALUATE, IMPROVE_CONTENT, FORMAT_CONTENT, PUBLISH, COMPLETE.\n"
44
+ "Trigger using lines: action: <TOOL> and action_input=<TEXT>.\n"
45
+ )
46
+
47
+ IDEA_GENERATOR_PROMPT = (
48
+ "Generate one viral content idea. Consider trending topics, underserved niches, controversy, practical value, and emotion.\n"
49
+ "Return a single concise title. Topic: {topic}. History: {history}"
50
+ )
51
+
52
+ RESEARCH_PROMPT = (
53
+ "You are researching: {topic}. Summarize key facts with bullet points. Include stats with sources when available."
54
+ )
55
+
56
+ CONTENT_PROMPT = (
57
+ "Create {format_type} content about: {topic}. Use the following research notes: {research}.\n"
58
+ "Hook, sections with headings, and a clear wrap‑up. Keep it factual and concise."
59
+ )
60
+
61
+ EVALUATE_PROMPT = (
62
+ "Evaluate content quality and viral potential from 1‑10 for engagement, accuracy, originality, emotion, readability, and headline strength.\n"
63
+ "Return compact JSON with fields per_criterion and overall plus three specific improvements. Content: {content}"
64
+ )
65
+
66
+ IMPROVE_PROMPT = (
67
+ "Improve the content using this feedback: {feedback}. Strengthen hook, structure, and specificity. Return the full revised content. Content: {content}"
68
+ )
69
+
70
+ FORMAT_PROMPT = (
71
+ "Format the content for publication. Add an SEO title (<70 chars), meta description (<160 chars), h2/h3 where useful, and a short CTA. Content: {content}"
72
+ )
73
+
74
+ PUBLISH_PROMPT = (
75
+ "Prepare publication package fields: title, summary, tags[], canonical, published_at (ISO8601 UTC), body. Content: {content}"
76
+ )
77
+
78
+ # ---------------------------
79
+ # Utilities
80
+ # ---------------------------
81
+
82
+ def utc_now_iso() -> str:
83
+ return datetime.now(timezone.utc).replace(microsecond=0).isoformat()
84
+
85
+
86
+ def json_dumps(obj: Any) -> str:
87
+ return json.dumps(obj, ensure_ascii=False, separators=(",", ":"))
88
+
89
+
90
+ def clamp_text(s: str, max_len: int = 6000) -> str:
91
+ if len(s) <= max_len:
92
+ return s
93
+ return s[: max(0, max_len - 3)] + "..."
94
+
95
+
96
+ # ---------------------------
97
+ # LLM backends
98
+ # ---------------------------
99
+
100
+ class LLM:
101
+ def complete(self, prompt: str, max_tokens: int = 800) -> str:
102
+ raise NotImplementedError
103
+
104
+
105
+ class HFInferenceLLM(LLM):
106
+ """Hugging Face text‑generation inference. Expects env HUGGINGFACE_API_TOKEN and HUGGINGFACE_MODEL."""
107
+
108
+ def __init__(self, model: Optional[str] = None, timeout: int = 60):
109
+ self.token = os.getenv("HUGGINGFACE_API_TOKEN")
110
+ self.model = model or os.getenv("HUGGINGFACE_MODEL", "gpt2")
111
+ self.timeout = timeout
112
+ self.endpoint = f"https://api-inference.huggingface.co/models/{self.model}"
113
+
114
+ def complete(self, prompt: str, max_tokens: int = 800) -> str:
115
+ if not self.token:
116
+ raise RuntimeError("HUGGINGFACE_API_TOKEN not set")
117
+ headers = {"Authorization": f"Bearer {self.token}", "Accept": "application/json"}
118
+ payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_tokens, "return_full_text": False}}
119
+ r = requests.post(self.endpoint, headers=headers, json=payload, timeout=self.timeout)
120
+ r.raise_for_status()
121
+ data = r.json()
122
+ # Response shape can vary; normalize
123
+ if isinstance(data, list) and data and "generated_text" in data[0]:
124
+ return str(data[0]["generated_text"]).strip()
125
+ if isinstance(data, dict) and "generated_text" in data:
126
+ return str(data["generated_text"]).strip()
127
+ # Fallback parsing
128
+ return json_dumps(data)
129
+
130
+
131
+ class OpenAILLM(LLM):
132
+ """OpenAI responses via /v1/chat/completions. Requires OPENAI_API_KEY and OPENAI_MODEL."""
133
+
134
+ def __init__(self, model: Optional[str] = None, timeout: int = 60):
135
+ self.key = os.getenv("OPENAI_API_KEY")
136
+ self.model = model or os.getenv("OPENAI_MODEL", "gpt-4o-mini")
137
+ self.timeout = timeout
138
+ self.url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1/chat/completions")
139
+
140
+ def complete(self, prompt: str, max_tokens: int = 800) -> str:
141
+ if not self.key:
142
+ raise RuntimeError("OPENAI_API_KEY not set")
143
+ headers = {"Authorization": f"Bearer {self.key}", "Content-Type": "application/json"}
144
+ payload = {
145
+ "model": self.model,
146
+ "messages": [{"role": "user", "content": prompt}],
147
+ "temperature": 0.2,
148
+ "max_tokens": max_tokens,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  }
150
+ r = requests.post(self.url, headers=headers, json=payload, timeout=self.timeout)
151
+ r.raise_for_status()
152
+ data = r.json()
153
+ return data["choices"][0]["message"]["content"].strip()
154
+
155
+
156
+ class GenericHTTPJSONLLM(LLM):
157
+ """POSTs to LLM_ENDPOINT with JSON {prompt,max_tokens}. Expects plain text in response body or JSON {text:...}."""
158
+
159
+ def __init__(self, endpoint: Optional[str] = None, timeout: int = 60):
160
+ self.endpoint = endpoint or os.getenv("LLM_ENDPOINT")
161
+ self.timeout = timeout
162
+
163
+ def complete(self, prompt: str, max_tokens: int = 800) -> str:
164
+ if not self.endpoint:
165
+ raise RuntimeError("LLM_ENDPOINT not set")
166
+ r = requests.post(self.endpoint, json={"prompt": prompt, "max_tokens": max_tokens}, timeout=self.timeout)
167
+ r.raise_for_status()
168
+ try:
169
+ data = r.json()
170
+ return str(data.get("text") or data.get("output") or data).strip()
171
+ except Exception:
172
+ return r.text.strip()
173
+
174
+
175
+ class RuleBasedLLM(LLM):
176
+ """Offline, deterministic fallback. Produces concise templates to keep the pipeline functional without keys."""
177
+
178
+ def complete(self, prompt: str, max_tokens: int = 800) -> str:
179
+ # Very small heuristics to keep output useful and testable
180
+ if "Generate one viral content idea" in prompt:
181
+ return "AI Side‑Hustles in 2025: 11 Practical Plays That Actually Work"
182
+ if "You are researching" in prompt:
183
+ topic = re.search(r"researching:\s*(.+?)\.\s*Summarize", prompt)
184
+ t = topic.group(1) if topic else "the topic"
185
+ return (
186
+ f"- Definition and scope of {t}\n"
187
+ f"- 2024–2025 trendline and adoption\n"
188
+ f"- 3 data points with sources\n"
189
+ f"- Risks, regulation, and future outlook"
190
+ )
191
+ if "Evaluate content quality" in prompt:
192
+ return json_dumps({
193
+ "per_criterion": {
194
+ "engagement": 8, "accuracy": 7, "originality": 7,
195
+ "emotion": 7, "readability": 8, "headline": 7,
196
+ },
197
+ "overall": 7.5,
198
+ "improvements": [
199
+ "Tighten hook with concrete stat",
200
+ "Add one contrarian insight",
201
+ "Replace generic CTA with a next‑step checklist",
202
+ ],
203
+ })
204
+ if "Improve the content" in prompt:
205
+ return "[Improved] " + clamp_text(prompt.split("Content:", 1)[-1].strip())
206
+ if "Format the content for publication" in prompt:
207
+ return (
208
+ "SEO Title: Practical AI Side‑Hustles for 2025\n"
209
+ "Meta: A concise guide with data, risks, and an action checklist.\n"
210
+ "\n## Introduction\n...\n\n## CTA\nGrab the checklist."
211
+ )
212
+ if "Prepare publication package" in prompt:
213
+ now = utc_now_iso()
214
+ return json_dumps({
215
+ "title": "Practical AI Side‑Hustles for 2025",
216
+ "summary": "Concise, data‑guided ideas with risks and a checklist.",
217
+ "tags": ["AI", "side‑hustle", "2025"],
218
+ "canonical": "",
219
+ "published_at": now,
220
+ "body": "...",
221
+ })
222
+ # Default short echo
223
+ return clamp_text("[draft] " + prompt[-max_tokens:])
224
+
225
+
226
+ def build_llm() -> LLM:
227
+ # Order of preference: explicit endpoint, OpenAI, HF, fallback
228
+ try:
229
+ if os.getenv("LLM_ENDPOINT"):
230
+ logger.info("Using GenericHTTPJSONLLM")
231
+ return GenericHTTPJSONLLM()
232
+ if os.getenv("OPENAI_API_KEY"):
233
+ logger.info("Using OpenAILLM")
234
+ return OpenAILLM()
235
+ if os.getenv("HUGGINGFACE_API_TOKEN"):
236
+ logger.info("Using HFInferenceLLM")
237
+ return HFInferenceLLM()
238
+ except Exception as e:
239
+ logger.warning("LLM backend init failed, falling back: %s", e)
240
+ logger.info("Using RuleBasedLLM fallback")
241
+ return RuleBasedLLM()
242
+
243
+
244
+ # ---------------------------
245
+ # Research utilities
246
+ # ---------------------------
247
+
248
+ def ddg_search_snippets(query: str, limit: int = 5, timeout: int = 15) -> List[Dict[str, str]]:
249
+ """Very light HTML scrape of DuckDuckGo HTML to avoid heavy APIs. Returns [{title,url,snippet}]"""
250
+ try:
251
+ url = "https://html.duckduckgo.com/html/"
252
+ r = requests.post(url, data={"q": query}, timeout=timeout, headers={"User-Agent": "agent/1.0"})
253
+ r.raise_for_status()
254
+ html = r.text
255
+ # naive parsing
256
+ results = []
257
+ for m in re.finditer(r'<a[^>]+class="result__a"[^>]*href=\"([^\"]+)\"[^>]*>(.*?)</a>', html):
258
+ link = m.group(1)
259
+ title = re.sub("<.*?>", "", m.group(2))
260
+ results.append({"title": title, "url": link, "snippet": ""})
261
+ if len(results) >= limit:
262
+ break
263
+ return results
264
+ except Exception as e:
265
+ logger.warning("ddg_search_snippets failed: %s", e)
266
+ return []
267
+
268
+
269
+ def wikipedia_summary(topic: str, timeout: int = 15) -> Optional[str]:
270
+ try:
271
+ api = "https://en.wikipedia.org/api/rest_v1/page/summary/" + requests.utils.quote(topic)
272
+ r = requests.get(api, timeout=timeout, headers={"User-Agent": "agent/1.0"})
273
+ if r.status_code == 200:
274
+ data = r.json()
275
+ return data.get("extract")
276
+ except Exception as e:
277
+ logger.warning("wikipedia_summary failed: %s", e)
278
+ return None
279
+
280
+
281
+ # ---------------------------
282
+ # Tools
283
+ # ---------------------------
284
+
285
+ @dataclass
286
+ class AgentHistory:
287
+ items: List[str] = field(default_factory=list)
288
+
289
+ def add(self, line: str) -> None:
290
+ self.items.append(line)
291
+
292
+ def render(self, max_len: int = 4000) -> str:
293
+ text = "\n".join(self.items)
294
+ return clamp_text(text, max_len)
295
+
296
+
297
+ @dataclass
298
+ class AgentConfig:
299
+ seed: int = 42
300
+ max_loops: int = 8
301
+ max_tokens: int = 800
302
+ log_jsonl: Optional[str] = os.getenv("AGENT_LOG_JSONL")
303
+
304
+
305
+ class ViralAgent:
306
+ ALLOWED_TOOLS = {
307
+ "GENERATE_IDEA",
308
+ "RESEARCH",
309
+ "GENERATE_CONTENT",
310
+ "SELF_EVALUATE",
311
+ "IMPROVE_CONTENT",
312
+ "FORMAT_CONTENT",
313
+ "PUBLISH",
314
+ "COMPLETE",
315
  }
 
 
 
 
 
 
 
 
 
 
316
 
317
+ def __init__(self, llm: Optional[LLM] = None, cfg: Optional[AgentConfig] = None):
318
+ self.llm = llm or build_llm()
319
+ self.cfg = cfg or AgentConfig()
320
+ random.seed(self.cfg.seed)
321
+ self.history = AgentHistory()
322
+ self.session_id = uuid.uuid4().hex[:8]
323
+ logger.info("session=%s seed=%s", self.session_id, self.cfg.seed)
324
+
325
+ # -------- action loop --------
326
+ ACTION_RE = re.compile(r"^\s*action:\s*([A-Z_]+)\s*\naction_input=(.*)", re.S)
327
+
328
+ def run(self, task: str, purpose: str = "Generate viral content") -> Dict[str, Any]:
329
+ self.history.add(f"task: {task}")
330
+ context = PREFIX + f"Current Date/Time: {utc_now_iso()}\nPurpose: {purpose}\n"
331
+
332
+ for step in range(1, self.cfg.max_loops + 1):
333
+ prompt = (
334
+ f"{context}\nHistory:\n{self.history.render()}\n\n"
335
+ "Decide next step. Output exactly two lines:\n"
336
+ "action: <TOOL>\n"
337
+ "action_input=<TEXT>\n"
338
+ )
339
+ raw = self.llm.complete(prompt, max_tokens=self.cfg.max_tokens)
340
+ tool, payload = self._parse_action(raw)
341
+ logger.info("step=%s tool=%s", step, tool)
342
+ obs = self._dispatch(tool, payload, task)
343
+ self.history.add(f"observation: {clamp_text(obs, 800)}")
344
+ if tool == "COMPLETE":
345
+ return {"status": "ok", "session": self.session_id, "history": self.history.items}
346
+ return {"status": "max_loops", "session": self.session_id, "history": self.history.items}
347
+
348
+ # -------- parsing and dispatch --------
349
+ def _parse_action(self, text: str) -> Tuple[str, str]:
350
+ m = self.ACTION_RE.search(text or "")
351
+ if not m:
352
+ logger.warning("action parse failed; default to GENERATE_IDEA")
353
+ return "GENERATE_IDEA", "general tech trends 2025"
354
+ tool = m.group(1).strip().upper()
355
+ payload = m.group(2).strip()
356
+ if tool not in self.ALLOWED_TOOLS:
357
+ logger.warning("tool not allowed: %s", tool)
358
+ tool = "GENERATE_IDEA"
359
+ # guard payload
360
+ payload = clamp_text(payload, 4000)
361
+ return tool, payload
362
+
363
+ def _dispatch(self, tool: str, payload: str, task: str) -> str:
364
+ if tool == "GENERATE_IDEA":
365
+ idea = self.generate_idea(task, payload)
366
+ self.history.add(f"thought: generated idea -> {idea}")
367
+ return idea
368
+ if tool == "RESEARCH":
369
+ notes = self.research(payload or task)
370
+ self.history.add("thought: researched topic")
371
+ return notes
372
+ if tool == "GENERATE_CONTENT":
373
+ fmt = self._guess_format(payload)
374
+ notes = self._latest_research() or "key facts unavailable"
375
+ content = self.generate_content(task, fmt, notes)
376
+ self.history.add("thought: drafted content")
377
+ return content
378
+ if tool == "SELF_EVALUATE":
379
+ content = self._latest_content() or payload
380
+ return self.evaluate(content)
381
+ if tool == "IMPROVE_CONTENT":
382
+ content, feedback = self._split_two(payload)
383
+ improved = self.improve(content, feedback)
384
+ self.history.add("thought: improved content")
385
+ return improved
386
+ if tool == "FORMAT_CONTENT":
387
+ return self.format_content(payload)
388
+ if tool == "PUBLISH":
389
+ return self.publish(payload)
390
+ if tool == "COMPLETE":
391
+ return "done"
392
+ return "noop"
393
+
394
+ # -------- tool implementations --------
395
+ def generate_idea(self, topic: str, description: str) -> str:
396
+ p = IDEA_GENERATOR_PROMPT.format(topic=topic or description, history=self.history.render())
397
+ return self.llm.complete(p, max_tokens=120)
398
+
399
+ def research(self, topic: str) -> str:
400
+ topic = topic or "general topic"
401
+ bullets = []
402
+ # Try Wikipedia summary
403
+ s = wikipedia_summary(topic)
404
+ if s:
405
+ bullets.append("Wikipedia summary: " + s)
406
+ # Try DDG snippets
407
+ for r in ddg_search_snippets(topic, limit=5):
408
+ bullets.append(f"- {r['title']} — {r['url']}")
409
+ # LLM consolidation
410
+ prompt = RESEARCH_PROMPT.format(topic=topic)
411
+ llm_notes = self.llm.complete(prompt, max_tokens=200)
412
+ bullets.append(llm_notes)
413
+ notes = "\n".join(bullets)
414
+ # persist short log row
415
+ self._log_jsonl({"t": utc_now_iso(), "event": "research", "topic": topic, "notes": clamp_text(notes, 2000)})
416
+ return notes
417
+
418
+ def _guess_format(self, s: str) -> str:
419
+ s = s.lower()
420
+ for key in ["blog", "book", "review", "paper", "newsletter", "social"]:
421
+ if key in s:
422
+ return {
423
+ "blog": "blog_article",
424
+ "book": "book_chapter",
425
+ "review": "review_article",
426
+ "paper": "academic_paper",
427
+ "newsletter": "newsletter",
428
+ "social": "social_media_post",
429
+ }[key]
430
+ return "blog_article"
431
+
432
+ def generate_content(self, topic: str, format_type: str, research: str) -> str:
433
+ p = CONTENT_PROMPT.format(topic=topic, format_type=format_type, research=clamp_text(research, 2000))
434
+ content = self.llm.complete(p, max_tokens=700)
435
+ self._log_jsonl({"t": utc_now_iso(), "event": "content", "format": format_type, "len": len(content)})
436
+ return content
437
+
438
+ def evaluate(self, content: str) -> str:
439
+ p = EVALUATE_PROMPT.format(content=clamp_text(content, 2500))
440
+ out = self.llm.complete(p, max_tokens=220)
441
+ # validate JSON when possible
442
+ try:
443
+ obj = json.loads(out)
444
+ if isinstance(obj, dict):
445
+ out = json_dumps(obj)
446
+ except Exception:
447
+ pass
448
+ self._log_jsonl({"t": utc_now_iso(), "event": "evaluate"})
449
+ return out
450
+
451
+ def improve(self, content: str, feedback: str) -> str:
452
+ p = IMPROVE_PROMPT.format(content=clamp_text(content, 2500), feedback=clamp_text(feedback, 800))
453
+ out = self.llm.complete(p, max_tokens=700)
454
+ self._log_jsonl({"t": utc_now_iso(), "event": "improve"})
455
+ return out
456
+
457
+ def format_content(self, content: str) -> str:
458
+ p = FORMAT_PROMPT.format(content=clamp_text(content, 2500))
459
+ out = self.llm.complete(p, max_tokens=300)
460
+ self._log_jsonl({"t": utc_now_iso(), "event": "format"})
461
+ return out
462
+
463
+ def publish(self, content: str) -> str:
464
+ p = PUBLISH_PROMPT.format(content=clamp_text(content, 2000))
465
+ out = self.llm.complete(p, max_tokens=220)
466
+ # ensure minimal JSON shape
467
+ try:
468
+ obj = json.loads(out)
469
+ if "published_at" not in obj:
470
+ obj["published_at"] = utc_now_iso()
471
+ out = json_dumps(obj)
472
+ except Exception:
473
+ # wrap as minimal manifest
474
+ out = json_dumps({"title": "Untitled", "summary": "", "tags": [], "canonical": "", "published_at": utc_now_iso(), "body": out})
475
+ self._log_jsonl({"t": utc_now_iso(), "event": "publish"})
476
+ return out
477
+
478
+ # -------- helpers --------
479
+ def _split_two(self, block: str) -> Tuple[str, str]:
480
+ parts = block.split("\n\n", 1)
481
+ if len(parts) == 2:
482
+ return parts[0].strip(), parts[1].strip()
483
+ return block, ""
484
+
485
+ def _latest_research(self) -> Optional[str]:
486
+ for line in reversed(self.history.items):
487
+ if line.startswith("observation:") and ("Wikipedia summary:" in line or line.strip().startswith("- ")):
488
+ return line.split("observation:", 1)[-1].strip()
489
+ return None
490
+
491
+ def _latest_content(self) -> Optional[str]:
492
+ for line in reversed(self.history.items):
493
+ if line.startswith("observation:") and len(line) > 30 and ("##" in line or "#" in line or "\n" in line):
494
+ return line.split("observation:", 1)[-1].strip()
495
+ return None
496
+
497
+ def _log_jsonl(self, row: Dict[str, Any]) -> None:
498
+ path = self.cfg.log_jsonl
499
+ if not path:
500
+ return
501
+ try:
502
+ with open(path, "a", encoding="utf-8") as f:
503
+ f.write(json_dumps(row) + "\n")
504
+ except Exception as e:
505
+ logger.warning("jsonl log failed: %s", e)
506
+
507
+
508
+ # ---------------------------
509
+ # CLI
510
+ # ---------------------------
511
+
512
+ def run_cli() -> None:
513
+ import argparse
514
+
515
+ parser = argparse.ArgumentParser(description="Viral content agent")
516
+ parser.add_argument("task", help="Task to execute, e.g., 'Write a blog about X'")
517
+ parser.add_argument("--purpose", default="Generate viral content")
518
+ parser.add_argument("--seed", type=int, default=int(os.getenv("AGENT_SEED", "42")))
519
+ parser.add_argument("--max-loops", type=int, default=int(os.getenv("AGENT_MAX_LOOPS", "6")))
520
+ parser.add_argument("--log-jsonl", default=os.getenv("AGENT_LOG_JSONL"))
521
+ args = parser.parse_args()
522
+
523
+ cfg = AgentConfig(seed=args.seed, max_loops=args.max_loops, log_jsonl=args.log_jsonl)
524
+ agent = ViralAgent(cfg=cfg)
525
+ result = agent.run(task=args.task, purpose=args.purpose)
526
+ print(json_dumps(result))
527
+
528
+
529
+ if __name__ == "__main__":
530
+ run_cli()