| from typing import Dict |
| import torch |
| from diffusers import FluxPipeline |
| from io import BytesIO |
| import base64 |
|
|
| class EndpointHandler: |
| def __init__(self, path: str = ""): |
| print(f"Initializing model from: {path}") |
| |
| self.pipe = FluxPipeline.from_pretrained( |
| "black-forest-labs/FLUX.1-dev", |
| torch_dtype=torch.float16 |
| ) |
|
|
| print("Loading LoRA weights from: Texttra/Cityscape_Studio") |
| self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors") |
| self.pipe.fuse_lora(lora_scale=0.9) |
|
|
| self.pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
| print("Model initialized successfully.") |
|
|
| def __call__(self, data: Dict) -> Dict: |
| print("Received data:", data) |
|
|
| inputs = data.get("inputs", {}) |
| prompt = inputs.get("prompt", "") |
| print("Extracted prompt:", prompt) |
|
|
| if not prompt: |
| return {"error": "No prompt provided."} |
|
|
| image = self.pipe( |
| prompt, |
| num_inference_steps=50, |
| guidance_scale=4.5 |
| ).images[0] |
| print("Image generated.") |
|
|
| buffer = BytesIO() |
| image.save(buffer, format="PNG") |
| base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8") |
| print("Returning image.") |
|
|
| return {"image": base64_image} |
|
|
|
|