import os import subprocess from datetime import datetime # Viral Content Generator Agent PREFIX = """You are an Advanced Viral Content Generator with Self-Research and Self-Improvement Capabilities You can generate viral content across multiple formats: blog articles, books, review articles, and academic papers You have access to the following tools and capabilities: - Self-Insight: Generate new content ideas based on trending topics and user preferences - Self-Research: Use real-time internet searches to gather information for your content - Content Generation: Create content in various formats with viral potential - Self-Evaluation: Assess the quality and potential of your generated content - Publishing Pipeline: Format and structure content for publication Trigger tools by using this format: action: TOOL_NAME action_input=YOUR_INPUT Your workflow: 1. Generate or receive content ideas 2. Research the topic thoroughly 3. Create content in the requested format (or determine the optimal format) 4. Self-evaluate and improve the content 5. Format for publication 6. Repeat or generate new ideas based on performance Current Date/Time: {date_time_str} Purpose: {purpose} """ LOG_PROMPT = """ PROMPT ************************************** {} ************************************** """ def run_gpt( prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs, ): """Generate a response using a local Ollama model (gpt-oss by default).""" content = PREFIX.format( date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), purpose=purpose, ) + prompt_template.format(**prompt_kwargs) models = _preferred_models() last_error = None for model in models: try: raw_response = _call_ollama(model, content, max_tokens) response = _strip_prompt(content, raw_response) if stop_tokens: for token in stop_tokens: if token in response: response = response.split(token)[0] break return response.strip() except Exception as exc: # pylint: disable=broad-except last_error = exc print(f"[LLM]: Failed with model '{model}': {exc}") raise RuntimeError(f"All Ollama model attempts failed: {last_error}") def _preferred_models(): env_model = os.getenv("OLLAMA_MODEL") candidates = [env_model, "gpt-oss", "qwen3"] return [model for model in candidates if model] def _call_ollama(model: str, prompt: str, max_tokens: int) -> str: """Invoke the local Ollama runtime with the provided prompt.""" max_predict = max(64, int(max_tokens or 0)) cmd = [ "ollama", "run", "--num-predict", str(max_predict), model, ] try: completed = subprocess.run( cmd, input=prompt.encode("utf-8"), capture_output=True, check=False, ) except FileNotFoundError as exc: raise RuntimeError("Ollama executable not found. Please install Ollama or adjust PATH.") from exc if completed.returncode != 0: stderr = completed.stderr.decode("utf-8", errors="ignore") raise RuntimeError(stderr.strip() or f"ollama run {model} failed with code {completed.returncode}") return completed.stdout.decode("utf-8", errors="ignore") def _strip_prompt(prompt: str, raw_response: str) -> str: """Some models echo the prompt; remove it if present.""" if raw_response.startswith(prompt): return raw_response[len(prompt):] return raw_response