rahul7star commited on
Commit
dca5b2f
Β·
verified Β·
1 Parent(s): e2ddccd

Update app_quant_latent.py

Browse files
Files changed (1) hide show
  1. app_quant_latent.py +80 -1
app_quant_latent.py CHANGED
@@ -555,7 +555,86 @@ def safe_get_latents(pipe, height, width, generator, device, LOGS):
555
  # Main generation function (kept exactly as your logic)
556
  # --------------------------
557
  @spaces.GPU
558
- def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0): LOGS = [] device = "cuda" generator = torch.Generator(device).manual_seed(int(seed)) # placeholders placeholder = Image.new("RGB", (width, height), color=(255, 255, 255)) latent_gallery = [] final_gallery = [] try: # --- Try advanced latent mode --- try: latents = safe_get_latents(pipe, height, width, generator, device, LOGS) for i, t in enumerate(pipe.scheduler.timesteps): # Step-wise denoising with torch.no_grad(): noise_pred = pipe.unet(latents, t, encoder_hidden_states=pipe.get_text_embeddings(prompt))["sample"] latents = pipe.scheduler.step(noise_pred, t, latents)["prev_sample"] # Convert latent to preview image try: latent_img = latent_to_image(latents, pipe.vae)[0] except Exception: latent_img = placeholder latent_gallery.append(latent_img) # Yield intermediate update: latents updated, final gallery empty yield None, latent_gallery, final_gallery, LOGS # decode final image final_img = pipe.decode_latents(latents)[0] final_gallery.append(final_img) LOGS.append("βœ… Advanced latent pipeline succeeded.") yield final_img, latent_gallery, final_gallery, LOGS except Exception as e: LOGS.append(f"⚠️ Advanced latent mode failed: {e}") LOGS.append("πŸ” Switching to standard pipeline...") # Standard pipeline fallback try: output = pipe( prompt=prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, ) final_img = output.images[0] final_gallery.append(final_img) latent_gallery.append(final_img) # optionally show in latent gallery as last step LOGS.append("βœ… Standard pipeline succeeded.") yield final_img, latent_gallery, final_gallery, LOGS except Exception as e2: LOGS.append(f"❌ Standard pipeline failed: {e2}") final_gallery.append(placeholder) latent_gallery.append(placeholder) yield placeholder, latent_gallery, final_gallery, LOGS except Exception as e: LOGS.append(f"❌ Total failure: {e}") final_gallery.append(placeholder) latent_gallery.append(placeholder) yield placeholder, latent_gallery, final_gallery, LOGS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
 
560
  @spaces.GPU
561
  def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):
 
555
  # Main generation function (kept exactly as your logic)
556
  # --------------------------
557
  @spaces.GPU
558
+ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
559
+ LOGS = []
560
+ device = "cuda"
561
+ generator = torch.Generator(device).manual_seed(int(seed))
562
+
563
+ # placeholders
564
+ placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
565
+ latent_gallery = []
566
+ final_gallery = []
567
+
568
+ try:
569
+ # --- Try advanced latent mode ---
570
+ try:
571
+ latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
572
+
573
+ for i, t in enumerate(pipe.scheduler.timesteps):
574
+ # Step-wise denoising
575
+ with torch.no_grad():
576
+ noise_pred = pipe.unet(
577
+ latents,
578
+ t,
579
+ encoder_hidden_states=pipe.get_text_embeddings(prompt)
580
+ )["sample"]
581
+ latents = pipe.scheduler.step(noise_pred, t, latents)["prev_sample"]
582
+
583
+ # Convert latent to preview image
584
+ try:
585
+ latent_img = latent_to_image(latents, pipe.vae)[0]
586
+ except Exception:
587
+ latent_img = placeholder
588
+
589
+ latent_gallery.append(latent_img)
590
+
591
+ # Yield intermediate update: latents updated, final gallery empty
592
+ yield None, latent_gallery, final_gallery, LOGS
593
+
594
+ # Decode final image
595
+ final_img = pipe.decode_latents(latents)[0]
596
+ final_gallery.append(final_img)
597
+ LOGS.append("βœ… Advanced latent pipeline succeeded.")
598
+ yield final_img, latent_gallery, final_gallery, LOGS
599
+
600
+ except Exception as e:
601
+ LOGS.append(f"⚠️ Advanced latent mode failed: {e}")
602
+ LOGS.append("πŸ” Switching to standard pipeline...")
603
+
604
+ # Standard pipeline fallback
605
+ try:
606
+ output = pipe(
607
+ prompt=prompt,
608
+ height=height,
609
+ width=width,
610
+ num_inference_steps=steps,
611
+ guidance_scale=guidance_scale,
612
+ generator=generator,
613
+ )
614
+ final_img = output.images[0]
615
+ final_gallery.append(final_img)
616
+ latent_gallery.append(final_img) # optionally show in latent gallery as last step
617
+ LOGS.append("βœ… Standard pipeline succeeded.")
618
+ yield final_img, latent_gallery, final_gallery, LOGS
619
+
620
+ except Exception as e2:
621
+ LOGS.append(f"❌ Standard pipeline failed: {e2}")
622
+ final_gallery.append(placeholder)
623
+ latent_gallery.append(placeholder)
624
+ yield placeholder, latent_gallery, final_gallery, LOGS
625
+
626
+ except Exception as e:
627
+ LOGS.append(f"❌ Total failure: {e}")
628
+ final_gallery.append(placeholder)
629
+ latent_gallery.append(placeholder)
630
+ yield placeholder, latent_gallery, final_gallery, LOGS
631
+
632
+
633
+
634
+
635
+
636
+
637
+
638
 
639
  @spaces.GPU
640
  def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):