Spaces:
Runtime error
Runtime error
| import os | |
| import subprocess | |
| import random | |
| import torch | |
| from transformers import pipeline | |
| import gradio as gr | |
| from safe_search import safe_search | |
| from i_search import google, i_search as i_s | |
| from agent import ( | |
| ACTION_PROMPT, | |
| ADD_PROMPT, | |
| COMPRESS_HISTORY_PROMPT, | |
| LOG_PROMPT, | |
| LOG_RESPONSE, | |
| MODIFY_PROMPT, | |
| PREFIX, | |
| SEARCH_QUERY, | |
| READ_PROMPT, | |
| TASK_PROMPT, | |
| UNDERSTAND_TEST_RESULTS_PROMPT, | |
| ) | |
| from utils import parse_action, parse_file_content, read_python_module_structure | |
| from datetime import datetime | |
| def format_prompt(message, history): | |
| """Formats the prompt by concatenating user prompts and bot responses.""" | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def run_gpt( | |
| prompt_template, | |
| stop_tokens, | |
| max_tokens, | |
| purpose, | |
| **prompt_kwargs, | |
| ): | |
| """Runs the GPT model to generate a response.""" | |
| seed = random.randint(1, 1111111111111111) | |
| print(seed) | |
| generate_kwargs = dict( | |
| temperature=1.0, | |
| max_new_tokens=2096, | |
| top_p=0.99, | |
| repetition_penalty=1.0, | |
| do_sample=True, | |
| seed=seed, | |
| ) | |
| content = PREFIX.format( | |
| date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), | |
| purpose=purpose, | |
| safe_search=safe_search, | |
| ) + prompt_template.format(**prompt_kwargs) | |
| if True: | |
| print(LOG_PROMPT.format(content)) | |
| model = pipeline('text-generation', model='microsoft/DialoGPT-small', pad_token_id=model.config.eos_token_id) # Set pad_token_id | |
| response = model(content, max_length=max_tokens, temperature=1.0, truncation=True) # Explicitly set truncation | |
| resp = response[0]['generated_text'] | |
| if True: | |
| print(LOG_RESPONSE.format(resp)) | |
| return resp | |
| def compress_history(purpose, task, history, directory): | |
| """Compresses the history using the GPT model.""" | |
| resp = run_gpt( | |
| COMPRESS_HISTORY_PROMPT, | |
| stop_tokens=["observation:", "task:", "action:", "thought:"], | |
| max_tokens=512, | |
| purpose=purpose, | |
| task=task, | |
| history=history, | |
| ) | |
| return resp | |
| def call_search(purpose, task, history, directory, action_input): | |
| """Calls the search function.""" | |
| print("CALLING SEARCH") | |
| try: | |
| if "http" in action_input: | |
| if "<" in action_input: | |
| action_input = action_input.replace("<", "") | |
| if ">" in action_input: | |
| action_input = action_input.replace(">", "") | |
| response = i_s(action_input) | |
| #response = google(search_return) | |
| print(response) | |
| history += "observation: search result is: {}\n".format(response) | |
| else: | |
| history += "observation: I need to provide a valid URL to 'action: SEARCH'\n" | |
| except Exception as e: | |
| history += "observation: {}'\n".format(e) | |
| return "MAIN", None, history, task | |
| def call_main(purpose, task, history, directory, action_input): | |
| """Calls the main function.""" | |
| resp = run_gpt( | |
| ACTION_PROMPT, | |
| stop_tokens=["observation:", "task:", "action:","thought:"], | |
| max_tokens=2096, | |
| purpose=purpose, | |
| task=task, | |
| history=history, | |
| ) | |
| lines = resp.strip().strip("\n").split("\n") | |
| for line in lines: | |
| if line == "": | |
| continue | |
| if line.startswith("thought: "): | |
| history += "{}\n".format(line) | |
| elif line.startswith("action: "): | |
| action_name, action_input = line.split(": ") | |
| print (f'ACTION_NAME :: {action_name}') | |
| print (f'ACTION_INPUT :: {action_input}') | |
| history += "{}\n".format(line) | |
| if "COMPLETE" in action_name or "COMPLETE" in action_input: | |
| task = "COMPLETE" | |
| return action_name, action_input, history, task | |
| else: | |
| return action_name, action_input, history, task | |
| else: | |
| history += "{}\n".format(line) | |
| #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line) | |
| #return action_name, action_input, history, task | |
| #assert False, "unknown action: {}".format(line) | |
| return "MAIN", None, history, task | |
| def call_set_task(purpose, task, history, directory, action_input): | |
| """Sets the task.""" | |
| task = "COMPLETE" | |
| resp = run_gpt( | |
| TASK_PROMPT, | |
| stop_tokens=[], | |
| max_tokens=64, | |
| purpose=purpose, | |
| task=task, | |
| history=history, | |
| ).strip("\n") | |
| history += "observation: task has been updated to: {}\n".format(task) | |
| return "MAIN", None, history, task | |
| def end_fn(purpose, task, history, directory, action_input): | |
| """Ends the function.""" | |
| task = "COMPLETE" | |
| return "COMPLETE", "COMPLETE", history, task | |
| NAME_TO_FUNC = { | |
| "MAIN": call_main, | |
| "UPDATE-TASK": call_set_task, | |
| "SEARCH": call_search, | |
| "COMPLETE": end_fn, | |
| } | |
| def run_action(purpose, task, history, directory, action_name, action_input): | |
| """Runs the action.""" | |
| print(f'action_name::{action_name}') | |
| try: | |
| if "RESPONSE" in action_name or "COMPLETE" in action_name: | |
| action_name = "COMPLETE" | |
| task = "COMPLETE" | |
| return action_name, "COMPLETE", history, task | |
| # compress the history when it is long | |
| if len(history.split("\n")) > 5: | |
| if True: | |
| print("COMPRESSING HISTORY") | |
| history = history | |
| if not action_name in NAME_TO_FUNC: | |
| action_name = "MAIN" | |
| if action_name == "SEARCH": | |
| action_name = "SEARCH" | |
| assert action_name in NAME_TO_FUNC | |
| print("RUN: ", action_name, action_input) | |
| return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input) | |
| except Exception as e: | |
| history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n" | |
| return "MAIN", None, history, task | |
| def run(purpose,history): | |
| """Runs the main loop.""" | |
| #print(purpose) | |
| #print(hist) | |
| task = "COMPLETE" | |
| directory = "directory" | |
| if history: | |
| history = history | |
| if not history: | |
| history = "" | |
| action_name = "MAIN" | |
| action_input = "input" | |
| while True: | |
| print("") | |
| print("") | |
| print("---") | |
| print("purpose:", purpose) | |
| print("task:", task) | |
| print("---") | |
| print(history) | |
| print("---") | |
| action_name, action_input, history, task = run_action( | |
| purpose, | |
| task, | |
| history, | |
| directory, | |
| action_name, | |
| action_input, | |
| ) | |
| yield (history) | |
| #yield ("",[(purpose,history)]) | |
| if task == "COMPLETE": | |
| return (history) | |
| #return ("", [(purpose,history)]) | |
| agents =[ | |
| "WEB_DEV", | |
| "AI_SYSTEM_PROMPT", | |
| "PYTHON_CODE_DEV" | |
| ] | |
| def generate( | |
| prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, | |
| ): | |
| """Generates the output.""" | |
| seed = random.randint(1,1111111111111111) | |
| agent=prompts.WEB_DEV | |
| if agent_name == "WEB_DEV": | |
| agent = prompts.WEB_DEV | |
| if agent_name == "AI_SYSTEM_PROMPT": | |
| agent = prompts.AI_SYSTEM_PROMPT | |
| if agent_name == "PYTHON_CODE_DEV": | |
| agent = prompts.PYTHON_CODE_DEV | |
| system_prompt=agent | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=seed, | |
| ) | |
| formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
| model = pipeline('text-generation', model='microsoft/DialoGPT-small') | |
| response = model(formatted_prompt, max_length=1024, temperature=1.0) | |
| output = response[0]['generated_text'] | |
| return output | |
| additional_inputs=[ | |
| gr.Dropdown( | |
| label="Agents", | |
| choices=[s for s in agents], | |
| value=agents[0], | |
| interactive=True, | |
| ), | |
| gr.Textbox( | |
| label="System Prompt", | |
| max_lines=1, | |
| interactive=True, | |
| ), | |
| gr.Slider( | |
| label="Temperature", | |
| value=0.9, | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values produce more diverse outputs", | |
| ), | |
| gr.Slider( | |
| label="Max new tokens", | |
| value=1048*10, | |
| minimum=0, | |
| maximum=1048*10, | |
| step=64, | |
| interactive=True, | |
| info="The maximum numbers of new tokens", | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| value=0.90, | |
| minimum=0.0, | |
| maximum=1, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values sample more low-probability tokens", | |
| ), | |
| gr.Slider( | |
| label="Repetition penalty", | |
| value=1.2, | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Penalize repeated tokens", | |
| ), | |
| ] | |
| examples = [ | |
| [ | |
| "Write an intro for a Bitcoin book, covering origins, core principles, and disruption potential. Use these links: [https://bitcoin.org/bitcoin.pdf, https://en.wikipedia.org/wiki/Satoshi_Nakamoto, https://www.investopedia.com/terms/b/bitcoin.asp]", | |
| "Introduction", | |
| ], | |
| [ | |
| "Write a chapter titled 'The Genesis of Bitcoin' using these links: [https://bitcoin.org/bitcoin.pdf, https://en.wikipedia.org/wiki/Satoshi_Nakamoto, https://www.investopedia.com/terms/b/bitcoin.asp]", | |
| "The Genesis of Bitcoin", | |
| ], | |
| [ | |
| "Write a chapter titled 'The Rise of the Decentralized' using these links: [https://en.wikipedia.org/wiki/Bitcoin_exchange, https://en.wikipedia.org/wiki/Bitcoin_mining, https://www.investopedia.com/terms/d/decentralized-finance-defi.asp]", | |
| "The Rise of the Decentralized", | |
| ], | |
| [ | |
| "Write a chapter titled 'The Bitcoin Revolution' using these links: [https://www.investopedia.com/terms/b/bitcoin-volatility.asp, https://www.investopedia.com/terms/s/scalability.asp, https://www.investopedia.com/terms/r/regulation.asp]", | |
| "The Bitcoin Revolution", | |
| ], | |
| [ | |
| "Write a chapter titled 'Beyond Bitcoin: The Blockchain Revolution' using these links: [https://www.investopedia.com/terms/b/blockchain.asp, https://www.ibm.com/topics/blockchain]", | |
| "Beyond Bitcoin: The Blockchain Revolution", | |
| ], | |
| [ | |
| "Write a chapter titled 'Reshaping Finance: Bitcoin's Impact on the Global System' using these links: [https://www.investopedia.com/terms/c/central-bank-digital-currency-cbdc.asp, https://www.investopedia.com/terms/i/international-trade.asp]", | |
| "Reshaping Finance: Bitcoin's Impact on the Global System", | |
| ], | |
| [ | |
| "Write a chapter titled 'Empowering Individuals: Bitcoin's Social Impact' using these links: [https://en.wikipedia.org/wiki/Financial_inclusion, https://www.investopedia.com/terms/w/wealth-distribution.asp]", | |
| "Empowering Individuals: Bitcoin's Social Impact", | |
| ], | |
| [ | |
| "Write a chapter titled 'A New World Order: Bitcoin's Potential for Change' using these links: [https://en.wikipedia.org/wiki/Sustainable_development, https://en.wikipedia.org/wiki/Global_trade, https://www.un.org/en/development/desa/policy/wssd/]", | |
| "A New World Order: Bitcoin's Potential for Change", | |
| ], | |
| [ | |
| "Write a chapter titled 'The Regulatory Landscape' using these links: [https://www.investopedia.com/terms/r/regulation.asp, https://www.coindesk.com/regulation]", | |
| "The Regulatory Landscape", | |
| ], | |
| [ | |
| "Write a chapter titled 'The Environmental Impact' using these links: [https://www.investopedia.com/terms/b/bitcoin-mining.asp, https://digiconomist.net/bitcoin-energy-consumption]", | |
| "The Environmental Impact", | |
| ], | |
| ] | |
| def launch_interface(): | |
| """Launches the interface.""" | |
| gr.ChatInterface( | |
| fn=run, | |
| chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"), | |
| title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test", | |
| examples=examples, | |
| concurrency_limit=20, | |
| ).launch(show_api=False) | |
| launch_interface() |