Spaces:
Paused
Paused
Update app_quant_latent.py
Browse files- app_quant_latent.py +80 -1
app_quant_latent.py
CHANGED
|
@@ -555,7 +555,86 @@ def safe_get_latents(pipe, height, width, generator, device, LOGS):
|
|
| 555 |
# Main generation function (kept exactly as your logic)
|
| 556 |
# --------------------------
|
| 557 |
@spaces.GPU
|
| 558 |
-
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 559 |
|
| 560 |
@spaces.GPU
|
| 561 |
def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):
|
|
|
|
| 555 |
# Main generation function (kept exactly as your logic)
|
| 556 |
# --------------------------
|
| 557 |
@spaces.GPU
|
| 558 |
+
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 559 |
+
LOGS = []
|
| 560 |
+
device = "cuda"
|
| 561 |
+
generator = torch.Generator(device).manual_seed(int(seed))
|
| 562 |
+
|
| 563 |
+
# placeholders
|
| 564 |
+
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 565 |
+
latent_gallery = []
|
| 566 |
+
final_gallery = []
|
| 567 |
+
|
| 568 |
+
try:
|
| 569 |
+
# --- Try advanced latent mode ---
|
| 570 |
+
try:
|
| 571 |
+
latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
|
| 572 |
+
|
| 573 |
+
for i, t in enumerate(pipe.scheduler.timesteps):
|
| 574 |
+
# Step-wise denoising
|
| 575 |
+
with torch.no_grad():
|
| 576 |
+
noise_pred = pipe.unet(
|
| 577 |
+
latents,
|
| 578 |
+
t,
|
| 579 |
+
encoder_hidden_states=pipe.get_text_embeddings(prompt)
|
| 580 |
+
)["sample"]
|
| 581 |
+
latents = pipe.scheduler.step(noise_pred, t, latents)["prev_sample"]
|
| 582 |
+
|
| 583 |
+
# Convert latent to preview image
|
| 584 |
+
try:
|
| 585 |
+
latent_img = latent_to_image(latents, pipe.vae)[0]
|
| 586 |
+
except Exception:
|
| 587 |
+
latent_img = placeholder
|
| 588 |
+
|
| 589 |
+
latent_gallery.append(latent_img)
|
| 590 |
+
|
| 591 |
+
# Yield intermediate update: latents updated, final gallery empty
|
| 592 |
+
yield None, latent_gallery, final_gallery, LOGS
|
| 593 |
+
|
| 594 |
+
# Decode final image
|
| 595 |
+
final_img = pipe.decode_latents(latents)[0]
|
| 596 |
+
final_gallery.append(final_img)
|
| 597 |
+
LOGS.append("β
Advanced latent pipeline succeeded.")
|
| 598 |
+
yield final_img, latent_gallery, final_gallery, LOGS
|
| 599 |
+
|
| 600 |
+
except Exception as e:
|
| 601 |
+
LOGS.append(f"β οΈ Advanced latent mode failed: {e}")
|
| 602 |
+
LOGS.append("π Switching to standard pipeline...")
|
| 603 |
+
|
| 604 |
+
# Standard pipeline fallback
|
| 605 |
+
try:
|
| 606 |
+
output = pipe(
|
| 607 |
+
prompt=prompt,
|
| 608 |
+
height=height,
|
| 609 |
+
width=width,
|
| 610 |
+
num_inference_steps=steps,
|
| 611 |
+
guidance_scale=guidance_scale,
|
| 612 |
+
generator=generator,
|
| 613 |
+
)
|
| 614 |
+
final_img = output.images[0]
|
| 615 |
+
final_gallery.append(final_img)
|
| 616 |
+
latent_gallery.append(final_img) # optionally show in latent gallery as last step
|
| 617 |
+
LOGS.append("β
Standard pipeline succeeded.")
|
| 618 |
+
yield final_img, latent_gallery, final_gallery, LOGS
|
| 619 |
+
|
| 620 |
+
except Exception as e2:
|
| 621 |
+
LOGS.append(f"β Standard pipeline failed: {e2}")
|
| 622 |
+
final_gallery.append(placeholder)
|
| 623 |
+
latent_gallery.append(placeholder)
|
| 624 |
+
yield placeholder, latent_gallery, final_gallery, LOGS
|
| 625 |
+
|
| 626 |
+
except Exception as e:
|
| 627 |
+
LOGS.append(f"β Total failure: {e}")
|
| 628 |
+
final_gallery.append(placeholder)
|
| 629 |
+
latent_gallery.append(placeholder)
|
| 630 |
+
yield placeholder, latent_gallery, final_gallery, LOGS
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
|
| 638 |
|
| 639 |
@spaces.GPU
|
| 640 |
def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):
|