KleinBase4B Templates
Collection
11 items • Updated • 2
This model is one of the open-source Diffusion Templates series models from DiffSynth-Studio. It is a ControlNet control model capable of precisely guiding the spatial structure, object outlines, and perspective of generated images through an input reference image.
| Condition | Prompt: A cat is sitting on a stone, bathed in bright sunshine. | Prompt: A cat is sitting on a stone, surrounded by colorful magical particles. |
|---|---|---|
![]() |
![]() |
![]() |
git clone https://github.com/modelscope/DiffSynth-Studio.git
cd DiffSynth-Studio
pip install -e .
from diffsynth.diffusion.template import TemplatePipeline
from diffsynth.pipelines.flux2_image import Flux2ImagePipeline, ModelConfig
import torch
from modelscope import dataset_snapshot_download
from PIL import Image
pipe = Flux2ImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-base-4B", origin_file_pattern="transformer/*.safetensors"),
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="text_encoder/*.safetensors"),
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="tokenizer/"),
)
template = TemplatePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[ModelConfig(model_id="DiffSynth-Studio/Template-KleinBase4B-ControlNet")],
)
dataset_snapshot_download(
"DiffSynth-Studio/examples_in_diffsynth",
allow_file_pattern=["templates/*"],
local_dir="data/examples",
)
image = template(
pipe,
prompt="A cat is sitting on a stone, bathed in bright sunshine.",
seed=0, cfg_scale=4, num_inference_steps=50,
template_inputs=[{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "A cat is sitting on a stone, bathed in bright sunshine.",
}],
negative_template_inputs=[{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "",
}],
)
image.save("image_ControlNet_sunshine.jpg")
image = template(
pipe,
prompt="A cat is sitting on a stone, surrounded by colorful magical particles.",
seed=0, cfg_scale=4, num_inference_steps=50,
template_inputs=[{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "A cat is sitting on a stone, surrounded by colorful magical particles.",
}],
negative_template_inputs=[{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "",
}],
)
image.save("image_ControlNet_magic.jpg")
from diffsynth.diffusion.template import TemplatePipeline
from diffsynth.pipelines.flux2_image import Flux2ImagePipeline, ModelConfig
import torch
from modelscope import dataset_snapshot_download
from PIL import Image
vram_config = {
"offload_dtype": "disk",
"offload_device": "disk",
"onload_dtype": torch.float8_e4m3fn,
"onload_device": "cpu",
"preparing_dtype": torch.float8_e4m3fn,
"preparing_device": "cuda",
"computation_dtype": torch.bfloat16,
"computation_device": "cuda",
}
pipe = Flux2ImagePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-base-4B", origin_file_pattern="transformer/*.safetensors", **vram_config),
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="text_encoder/*.safetensors", **vram_config),
ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
],
tokenizer_config=ModelConfig(model_id="black-forest-labs/FLUX.2-klein-4B", origin_file_pattern="tokenizer/"),
vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5,
)
template = TemplatePipeline.from_pretrained(
torch_dtype=torch.bfloat16,
device="cuda",
model_configs=[ModelConfig(model_id="DiffSynth-Studio/Template-KleinBase4B-ControlNet")],
lazy_loading=True,
)
dataset_snapshot_download(
"DiffSynth-Studio/examples_in_diffsynth",
allow_file_pattern=["templates/*"],
local_dir="data/examples",
)
image = template(
pipe,
prompt="A cat is sitting on a stone, bathed in bright sunshine.",
seed=0, cfg_scale=4, num_inference_steps=50,
template_inputs = [{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "A cat is sitting on a stone, bathed in bright sunshine.",
}],
negative_template_inputs = [{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "",
}],
)
image.save("image_ControlNet_sunshine.jpg")
image = template(
pipe,
prompt="A cat is sitting on a stone, surrounded by colorful magical particles.",
seed=0, cfg_scale=4, num_inference_steps=50,
template_inputs = [{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "A cat is sitting on a stone, surrounded by colorful magical particles.",
}],
negative_template_inputs = [{
"image": Image.open("data/examples/templates/image_depth.jpg"),
"prompt": "",
}],
)
image.save("image_ControlNet_magic.jpg")
After installing DiffSynth-Studio, use the following script to start training. For more information, please refer to the DiffSynth-Studio Documentation.
modelscope download --dataset DiffSynth-Studio/diffsynth_example_dataset --include "flux2/Template-KleinBase4B-ControlNet/*" --local_dir ./data/diffsynth_example_dataset
accelerate launch examples/flux2/model_training/train.py \
--dataset_base_path data/diffsynth_example_dataset/flux2/Template-KleinBase4B-ControlNet \
--dataset_metadata_path data/diffsynth_example_dataset/flux2/Template-KleinBase4B-ControlNet/metadata.jsonl \
--extra_inputs "template_inputs" \
--max_pixels 1048576 \
--dataset_repeat 50 \
--model_id_with_origin_paths "black-forest-labs/FLUX.2-klein-4B:text_encoder/*.safetensors,black-forest-labs/FLUX.2-klein-base-4B:transformer/*.safetensors,black-forest-labs/FLUX.2-klein-4B:vae/diffusion_pytorch_model.safetensors" \
--template_model_id_or_path "DiffSynth-Studio/Template-KleinBase4B-ControlNet:" \
--tokenizer_path "black-forest-labs/FLUX.2-klein-4B:tokenizer/" \
--learning_rate 1e-4 \
--num_epochs 2 \
--remove_prefix_in_ckpt "pipe.template_model." \
--output_path "./models/train/Template-KleinBase4B-ControlNet_full" \
--trainable_models "template_model" \
--use_gradient_checkpointing \
--find_unused_parameters