rahul7star commited on
Commit
da3e710
·
verified ·
1 Parent(s): e7274da

Update app_quant_latent.py

Browse files
Files changed (1) hide show
  1. app_quant_latent.py +23 -23
app_quant_latent.py CHANGED
@@ -581,62 +581,61 @@ def upload_latents_to_hf(latent_dict, filename="latents.pt"):
581
 
582
 
583
 
 
 
 
 
 
 
 
 
 
 
584
  @spaces.GPU
585
  def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
586
  LOGS = []
587
- device = "cuda"
588
  generator = torch.Generator(device).manual_seed(int(seed))
589
 
590
  placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
591
  latent_gallery = []
592
  final_gallery = []
593
 
594
- all_latents = [] # store all preview latents
595
 
596
  # --- Try generating latent previews ---
597
  try:
598
  latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
 
599
 
600
- # Loop through timesteps for preview generation
601
  for i, t in enumerate(pipe.scheduler.timesteps):
602
  try:
603
- # Convert latent tensor to PIL for preview
604
  with torch.no_grad():
605
- # Some pipelines may require same dtype as bias
606
- latent_to_decode = latents.to(pipe.vae.dtype)
607
  latent_img_tensor = pipe.vae.decode(latent_to_decode).sample # [1,3,H,W]
608
  latent_img_tensor = (latent_img_tensor / 2 + 0.5).clamp(0, 1)
609
- latent_img_tensor = latent_img_tensor.cpu().permute(0, 2, 3, 1)[0]
610
  latent_img = Image.fromarray((latent_img_tensor.numpy() * 255).astype("uint8"))
611
  except Exception:
612
  latent_img = placeholder
613
  LOGS.append("⚠️ Latent preview decode failed.")
614
 
615
  latent_gallery.append(latent_img)
616
- all_latents.append(latents.cpu().clone()) # save current latent
617
 
618
- # Yield intermediate preview every few steps
619
- if i % max(1, len(pipe.scheduler.timesteps) // 10) == 0:
620
  yield None, latent_gallery, LOGS
621
 
622
- # Upload full series of latents
623
- try:
624
- latent_dict = {
625
- "latents_series": all_latents,
626
- "prompt": prompt,
627
- "seed": seed
628
- }
629
- hf_url = upload_latents_to_hf(latent_dict, filename=f"latents_series_{seed}.pt")
630
- LOGS.append(f"🔹 All preview latents uploaded: {hf_url}")
631
- except Exception as e:
632
- LOGS.append(f"⚠️ Failed to upload all preview latents: {e}")
633
 
634
  except Exception as e:
635
  LOGS.append(f"⚠️ Latent generation failed: {e}")
636
  latent_gallery.append(placeholder)
637
  yield None, latent_gallery, LOGS
638
 
639
- # --- Final image: completely untouched, uses standard pipeline ---
640
  try:
641
  output = pipe(
642
  prompt=prompt,
@@ -648,7 +647,7 @@ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
648
  )
649
  final_img = output.images[0]
650
  final_gallery.append(final_img)
651
- latent_gallery.append(final_img) # fallback preview if needed
652
  LOGS.append("✅ Standard pipeline succeeded.")
653
  yield final_img, latent_gallery, LOGS
654
 
@@ -743,6 +742,7 @@ def generate_image0(prompt, height, width, steps, seed, guidance_scale=0.0):
743
 
744
 
745
  with gr.Blocks(title="Z-Image-Turbo") as demo:
 
746
  with gr.Tabs():
747
  with gr.TabItem("Image & Latents"):
748
  with gr.Row():
 
581
 
582
 
583
 
584
+ import asyncio
585
+ import torch
586
+ from PIL import Image
587
+
588
+ async def async_upload_latents(latent_dict, filename, LOGS):
589
+ try:
590
+ hf_url = await upload_latents_to_hf(latent_dict, filename=filename) # assume this can be async
591
+ LOGS.append(f"🔹 All preview latents uploaded: {hf_url}")
592
+ except Exception as e:
593
+ LOGS.append(f"⚠️ Failed to upload all preview latents: {e}")
594
  @spaces.GPU
595
  def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
596
  LOGS = []
597
+ device = "cpu" # CPU mode
598
  generator = torch.Generator(device).manual_seed(int(seed))
599
 
600
  placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
601
  latent_gallery = []
602
  final_gallery = []
603
 
604
+ all_latents = []
605
 
606
  # --- Try generating latent previews ---
607
  try:
608
  latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
609
+ latents = latents.to("cpu") # ensure CPU dtype
610
 
 
611
  for i, t in enumerate(pipe.scheduler.timesteps):
612
  try:
 
613
  with torch.no_grad():
614
+ latent_to_decode = latents.to(pipe.vae.dtype).cpu()
 
615
  latent_img_tensor = pipe.vae.decode(latent_to_decode).sample # [1,3,H,W]
616
  latent_img_tensor = (latent_img_tensor / 2 + 0.5).clamp(0, 1)
617
+ latent_img_tensor = latent_img_tensor.permute(0, 2, 3, 1)[0]
618
  latent_img = Image.fromarray((latent_img_tensor.numpy() * 255).astype("uint8"))
619
  except Exception:
620
  latent_img = placeholder
621
  LOGS.append("⚠️ Latent preview decode failed.")
622
 
623
  latent_gallery.append(latent_img)
624
+ all_latents.append(latents.cpu().clone())
625
 
626
+ if i % max(1, len(pipe.scheduler.timesteps)//10) == 0:
 
627
  yield None, latent_gallery, LOGS
628
 
629
+ # --- Async upload of full latent series ---
630
+ latent_dict = {"latents_series": all_latents, "prompt": prompt, "seed": seed}
631
+ asyncio.create_task(async_upload_latents(latent_dict, f"latents_series_{seed}.pt", LOGS))
 
 
 
 
 
 
 
 
632
 
633
  except Exception as e:
634
  LOGS.append(f"⚠️ Latent generation failed: {e}")
635
  latent_gallery.append(placeholder)
636
  yield None, latent_gallery, LOGS
637
 
638
+ # --- Final image: unchanged, uses standard pipeline ---
639
  try:
640
  output = pipe(
641
  prompt=prompt,
 
647
  )
648
  final_img = output.images[0]
649
  final_gallery.append(final_img)
650
+ latent_gallery.append(final_img)
651
  LOGS.append("✅ Standard pipeline succeeded.")
652
  yield final_img, latent_gallery, LOGS
653
 
 
742
 
743
 
744
  with gr.Blocks(title="Z-Image-Turbo") as demo:
745
+ gr.Markdown("# 🎨 DO NOT RUN THIS ")
746
  with gr.Tabs():
747
  with gr.TabItem("Image & Latents"):
748
  with gr.Row():