Spaces:
Paused
Paused
| import gradio as gr | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Load the model and tokenizer from Hugging Face Hub | |
| model_path = "Canstralian/pentest_ai" # Replace with your model path if needed | |
| model = AutoModelForCausalLM.from_pretrained(model_path) | |
| tokenizer = AutoTokenizer.from_pretrained(model_path) | |
| # Confirm successful loading | |
| print(f"Model and Tokenizer loaded from {model_path}") | |
| # Function to handle user inputs and generate responses | |
| def generate_text(instruction): | |
| # Encode the input text to token IDs | |
| inputs = tokenizer.encode(instruction, return_tensors='pt', truncation=True, max_length=512) | |
| print(f"Encoded input: {inputs}") | |
| # Generate the output text | |
| outputs = model.generate(inputs, max_length=150, num_beams=5, do_sample=True) # Adjust if needed | |
| # Decode the output and return the response | |
| output_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return output_text | |
| # Gradio interface to interact with the text generation function | |
| iface = gr.Interface( | |
| fn=generate_text, | |
| inputs=gr.Textbox(lines=2, placeholder="Enter your question or prompt here..."), | |
| outputs="text", | |
| title="Pentest AI Text Generator", | |
| description="Generate text using a fine-tuned model for pentesting-related queries." | |
| ) | |
| # Launch the interface | |
| iface.launch() | |