Spaces:
Paused
Paused
Update app_quant_latent.py
Browse files- app_quant_latent.py +89 -6
app_quant_latent.py
CHANGED
|
@@ -593,9 +593,9 @@ async def async_upload_latents(latent_dict, filename, LOGS):
|
|
| 593 |
LOGS.append(f"⚠️ Failed to upload all preview latents: {e}")
|
| 594 |
|
| 595 |
|
| 596 |
-
|
| 597 |
@spaces.GPU
|
| 598 |
-
def
|
| 599 |
LOGS = []
|
| 600 |
device = "cpu" # FORCE CPU
|
| 601 |
generator = torch.Generator(device).manual_seed(int(seed))
|
|
@@ -688,8 +688,94 @@ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
|
| 688 |
yield placeholder, latent_gallery, LOGS
|
| 689 |
|
| 690 |
# this is astable vesopn tha can gen final and a noise to latent
|
|
|
|
|
|
|
| 691 |
@spaces.GPU
|
| 692 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 693 |
LOGS = []
|
| 694 |
device = "cuda"
|
| 695 |
generator = torch.Generator(device).manual_seed(int(seed))
|
|
@@ -768,9 +854,6 @@ def generate_image0(prompt, height, width, steps, seed, guidance_scale=0.0):
|
|
| 768 |
|
| 769 |
|
| 770 |
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
|
| 774 |
with gr.Blocks(title="Z-Image-Turbo") as demo:
|
| 775 |
gr.Markdown("# 🎨 DO NOT RUN THIS ")
|
| 776 |
with gr.Tabs():
|
|
|
|
| 593 |
LOGS.append(f"⚠️ Failed to upload all preview latents: {e}")
|
| 594 |
|
| 595 |
|
| 596 |
+
# this code genetae all frame for latest GPU expseinve bt decide fails sp use this later
|
| 597 |
@spaces.GPU
|
| 598 |
+
def generate_image_all_latents(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 599 |
LOGS = []
|
| 600 |
device = "cpu" # FORCE CPU
|
| 601 |
generator = torch.Generator(device).manual_seed(int(seed))
|
|
|
|
| 688 |
yield placeholder, latent_gallery, LOGS
|
| 689 |
|
| 690 |
# this is astable vesopn tha can gen final and a noise to latent
|
| 691 |
+
|
| 692 |
+
|
| 693 |
@spaces.GPU
|
| 694 |
+
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 695 |
+
LOGS = []
|
| 696 |
+
device = "cuda"
|
| 697 |
+
generator = torch.Generator(device).manual_seed(int(seed))
|
| 698 |
+
|
| 699 |
+
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 700 |
+
latent_gallery = []
|
| 701 |
+
final_gallery = []
|
| 702 |
+
|
| 703 |
+
# --- Generate latent previews in a loop ---
|
| 704 |
+
try:
|
| 705 |
+
latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
|
| 706 |
+
|
| 707 |
+
# always keep latents float32 until decode
|
| 708 |
+
latents = latents.float()
|
| 709 |
+
|
| 710 |
+
num_previews = min(10, steps)
|
| 711 |
+
preview_steps = torch.linspace(0, 1, num_previews)
|
| 712 |
+
|
| 713 |
+
for i, alpha in enumerate(preview_steps):
|
| 714 |
+
|
| 715 |
+
try:
|
| 716 |
+
with torch.no_grad():
|
| 717 |
+
|
| 718 |
+
# simulate progression
|
| 719 |
+
preview_latent = latents * alpha + torch.randn_like(latents) * (1 - alpha)
|
| 720 |
+
|
| 721 |
+
# 🛠 FIX: match VAE dtype
|
| 722 |
+
vae_dtype = pipe.vae.dtype
|
| 723 |
+
preview_latent = preview_latent.to(vae_dtype)
|
| 724 |
+
|
| 725 |
+
# decode
|
| 726 |
+
latent_img_tensor = pipe.vae.decode(preview_latent).sample
|
| 727 |
+
latent_img_tensor = (latent_img_tensor / 2 + 0.5).clamp(0, 1)
|
| 728 |
+
latent_img_tensor = latent_img_tensor.cpu().permute(0,2,3,1)[0]
|
| 729 |
+
latent_img = Image.fromarray((latent_img_tensor.numpy() * 255).astype("uint8"))
|
| 730 |
+
|
| 731 |
+
except Exception as e:
|
| 732 |
+
LOGS.append(f"⚠️ Latent preview decode failed: {e}")
|
| 733 |
+
latent_img = placeholder
|
| 734 |
+
|
| 735 |
+
latent_gallery.append(latent_img)
|
| 736 |
+
yield None, latent_gallery, LOGS
|
| 737 |
+
|
| 738 |
+
# Upload latents
|
| 739 |
+
latent_dict = {"latents": latents.cpu(), "prompt": prompt, "seed": seed}
|
| 740 |
+
try:
|
| 741 |
+
hf_url = upload_latents_to_hf(latent_dict, filename=f"latents_{seed}.pt")
|
| 742 |
+
LOGS.append(f"🔹 Latents uploaded: {hf_url}")
|
| 743 |
+
except Exception as e:
|
| 744 |
+
LOGS.append(f"⚠️ Failed to upload latents: {e}")
|
| 745 |
+
|
| 746 |
+
except Exception as e:
|
| 747 |
+
LOGS.append(f"⚠️ Latent generation failed: {e}")
|
| 748 |
+
latent_gallery.append(placeholder)
|
| 749 |
+
yield None, latent_gallery, LOGS
|
| 750 |
+
|
| 751 |
+
# --- Final image: untouched standard pipeline ---
|
| 752 |
+
try:
|
| 753 |
+
output = pipe(
|
| 754 |
+
prompt=prompt,
|
| 755 |
+
height=height,
|
| 756 |
+
width=width,
|
| 757 |
+
num_inference_steps=steps,
|
| 758 |
+
guidance_scale=guidance_scale,
|
| 759 |
+
generator=generator,
|
| 760 |
+
)
|
| 761 |
+
final_img = output.images[0]
|
| 762 |
+
final_gallery.append(final_img)
|
| 763 |
+
latent_gallery.append(final_img) # fallback preview if needed
|
| 764 |
+
LOGS.append("✅ Standard pipeline succeeded.")
|
| 765 |
+
yield final_img, latent_gallery, LOGS
|
| 766 |
+
|
| 767 |
+
except Exception as e2:
|
| 768 |
+
LOGS.append(f"❌ Standard pipeline failed: {e2}")
|
| 769 |
+
final_gallery.append(placeholder)
|
| 770 |
+
latent_gallery.append(placeholder)
|
| 771 |
+
yield placeholder, latent_gallery, LOGS
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
# this is astable vesopn tha can gen final and a noise to latent
|
| 777 |
+
@spaces.GPU
|
| 778 |
+
def generate_image_safe(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 779 |
LOGS = []
|
| 780 |
device = "cuda"
|
| 781 |
generator = torch.Generator(device).manual_seed(int(seed))
|
|
|
|
| 854 |
|
| 855 |
|
| 856 |
|
|
|
|
|
|
|
|
|
|
| 857 |
with gr.Blocks(title="Z-Image-Turbo") as demo:
|
| 858 |
gr.Markdown("# 🎨 DO NOT RUN THIS ")
|
| 859 |
with gr.Tabs():
|