Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
638275e
1
Parent(s):
2dac737
0516_zerogpu
Browse files
app.py
CHANGED
|
@@ -128,7 +128,7 @@ woman_Embedding_Manager = models.embedding_manager.EmbeddingManagerId_adain(
|
|
| 128 |
loss_type = embedding_manager_config.model.personalization_config.params.loss_type,
|
| 129 |
vit_out_dim = input_dim,
|
| 130 |
)
|
| 131 |
-
|
| 132 |
|
| 133 |
DEFAULT_STYLE_NAME = "Watercolor"
|
| 134 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -208,13 +208,13 @@ def generate_image(experiment_name, label, prompts_array, chose_emb):
|
|
| 208 |
print("new")
|
| 209 |
torch.save(random_embedding, ran_emb_path)
|
| 210 |
_, emb_dict = Embedding_Manager(tokenized_text=None, embedded_text=None, name_batch=None, random_embeddings = random_embedding, timesteps = None,)
|
| 211 |
-
text_encoder.text_model.embeddings.forward = original_forward
|
| 212 |
test_emb = emb_dict["adained_total_embedding"].to(device)
|
| 213 |
torch.save(test_emb, test_emb_path)
|
| 214 |
elif label == "continue":
|
| 215 |
print("old")
|
| 216 |
test_emb = torch.load(chose_emb).cuda()
|
| 217 |
-
text_encoder.text_model.embeddings.forward = original_forward
|
| 218 |
|
| 219 |
v1_emb = test_emb[:, 0]
|
| 220 |
v2_emb = test_emb[:, 1]
|
|
@@ -298,7 +298,6 @@ def run_for_examples(example_emb, gender_GAN, choice, prompts_array):
|
|
| 298 |
print("label:",label)
|
| 299 |
|
| 300 |
test_emb = torch.load(example_emb).cuda()
|
| 301 |
-
text_encoder.text_model.embeddings.forward = original_forward
|
| 302 |
v1_emb = test_emb[:, 0]
|
| 303 |
v2_emb = test_emb[:, 1]
|
| 304 |
embeddings = [v1_emb, v2_emb]
|
|
|
|
| 128 |
loss_type = embedding_manager_config.model.personalization_config.params.loss_type,
|
| 129 |
vit_out_dim = input_dim,
|
| 130 |
)
|
| 131 |
+
text_encoder.text_model.embeddings.forward = original_forward
|
| 132 |
|
| 133 |
DEFAULT_STYLE_NAME = "Watercolor"
|
| 134 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 208 |
print("new")
|
| 209 |
torch.save(random_embedding, ran_emb_path)
|
| 210 |
_, emb_dict = Embedding_Manager(tokenized_text=None, embedded_text=None, name_batch=None, random_embeddings = random_embedding, timesteps = None,)
|
| 211 |
+
# text_encoder.text_model.embeddings.forward = original_forward
|
| 212 |
test_emb = emb_dict["adained_total_embedding"].to(device)
|
| 213 |
torch.save(test_emb, test_emb_path)
|
| 214 |
elif label == "continue":
|
| 215 |
print("old")
|
| 216 |
test_emb = torch.load(chose_emb).cuda()
|
| 217 |
+
# text_encoder.text_model.embeddings.forward = original_forward
|
| 218 |
|
| 219 |
v1_emb = test_emb[:, 0]
|
| 220 |
v2_emb = test_emb[:, 1]
|
|
|
|
| 298 |
print("label:",label)
|
| 299 |
|
| 300 |
test_emb = torch.load(example_emb).cuda()
|
|
|
|
| 301 |
v1_emb = test_emb[:, 0]
|
| 302 |
v2_emb = test_emb[:, 1]
|
| 303 |
embeddings = [v1_emb, v2_emb]
|