Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...


8162032

CFG0

CFG1

768px





CFG1

512px





CFG2

512px





CFG3

512px





CFG4

512px





CFG6

512px





CFG8

512px





Test 3 - Legs

Prompt: Generate a photo of a woman's legs, with her feet crossed and wearing white high-heeled shoes with ribbons tied around her ankles. The shoes should have a pointed toe and a stiletto heel. The woman's legs should be smooth and tanned, with a slight sheen to them. The background should be a light gray color. The photo should be taken from a low angle, looking up at the woman's legs. The ribbons should be tied in a bow shape around the ankles. The shoes should have a red sole. The woman's legs should be slightly bent at the knee.

...

Code Block


Config

Code Block
{
  "samples_filename_pattern": "[seq]-[date]-[model_name]-[height]x[width]-Seed[seed]-CFG[cfg]-STEP[steps]",
  "diffusers_version": "c052791b5fe29ce8a308bf63dda97aa205b729be",
  "diffusers_offload_mode": "none",
  "diffusers_to_gpu": true,
  "device_map": "gpu",
  "ui_request_timeout": 120000,
  "diffusers_vae_tile_size": 512,
  "sd_model_checkpoint": "Diffusers/black-forest-labs/FLUX.1-Krea-dev [8162a9c7b0]"
}


Model info

Code Block
Model: Diffusers/black-forest-labs/FLUX.1-dev
Type: f1
Class: FluxPipeline
Size: 0 bytes
Modified: 2025-07-15 12:03:09


ModuleClassDeviceDTypeParamsModulesConfig

vae

AutoencoderKLWanAutoencoderKL

xpu:0

torch.bfloat16

12689253183819683

260241

FrozenDict({'basein_dimchannels': 963, 'zout_dimchannels': 163, 'dimdown_block_multtypes': [1, 2, 4, 4'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'], 'numup_resblock_blockstypes': 2['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'attn_scalesUpDecoderBlock2D': [], 'temperalblock_out_downsamplechannels': [False128, 256, True512, True512], 'dropoutlayers_per_block': 0.02, 'latentsact_meanfn': [-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921], 'silu', 'latent_channels': 16, 'norm_num_groups': 32, 'sample_size': 1024, 'scaling_factor': 0.3611, 'shift_factor': 0.1159, 'latents_mean': None, 'latents_std': [2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.916], None, 'force_upcast': True, 'use_quant_conv': False, 'use_post_quant_conv': False, 'mid_block_add_attention': True, '_class_name': 'AutoencoderKLWanAutoencoderKL', '_diffusers_version': '0.3330.0.dev0', '_name_or_path': '/mnt/models/Diffusers/models--Wanblack-forest-AIlabs--Wan2FLUX.1-T2V-14B-Diffusersdev/snapshots/38ec498cb3208fb688890f8cc7e94ede2cbd7f683de623fc3c33e44ffbe2bad470d0f45bccf2eb21/vae'})

text_encoder

UMT5EncoderModelCLIPTextModelcpu

xpu:0

torch.bfloat16

5680910336123060480

486152

UMT5Config CLIPTextConfig { "architectures": [ "UMT5EncoderModelCLIPTextModel" ], "classifierattention_dropout": 0.0, "d_ff": 10240, "d_kv": 64, "d_model": 4096, "decoder_startbos_token_id": 0, "dense_act_fn": "gelu_new", "dropout_rate": 0.10, "eos_token_id": 12, "feedhidden_forward_projact": "gated-quick_gelu", "hidden_size": 768, "initializer_factor": 1.0, "isinitializer_encoder_decoderrange": true0.02, "isintermediate_gated_actsize": true3072, "layer_norm_epsiloneps": 1e-0605, "model_type": "umt5", "num_decoder_layers": 24, "num_heads": 64, "num_layers": 24, "output_past": true, "pad_token_id": 0, "relative_attention_max_distance": 128, "relative_attention_num_buckets": 32, "scalable_attention": true, "tie_word_embeddings": false, "tokenizer_class": "T5Tokenizer"max_position_embeddings": 77, "model_type": "clip_text_model", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 1, "projection_dim": 768, "torch_dtype": "bfloat16", "transformers_version": "4.53.2", "use_cache": true, "vocab_size": 256384 49408 }

vae

AutoencoderKLWan

xpu:0

torch.bfloat16

126892531

260

FrozenDict({'base_dim': 96, 'decoder_base_dim': None, 'z_dim': 16, 'dim_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_scales': [], 'temperal_downsample': [False, True, True], 'dropout': 0.0, 'latents_mean': [-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921], 'latents_std': [2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.916], 'is_residual': False, 'in_channels': 3, 'out_channels': 3, 'patch_size': None, 'scale_factor_temporal': 4, 'scale_factor_spatial': 8, 'clip_output': True, '_use_default_values': ['patch_size', 'out_channels', 'scale_factor_spatial', 'is_residual', 'clip_output', 'in_channels', 'decoder_base_dim', 'scale_factor_temporal'], '_class_name': 'AutoencoderKLWan', '_diffusers_version': '0.35.0.dev0', '_name_or_path': '/mnt/models/Diffusers/models--Wan-AI--Wan2.2-T2V-A14B-Diffusers/snapshots/20fb953f43ad58be9a9614a89fde4653f4ae5947/vae'})

text_encoder_2

T5EncoderModel

text_encoder

UMT5EncoderModel

xpu:0

torch.bfloat16

56809103364762310656

486463

UMT5Config T5Config { "architectures": [ "UMT5EncoderModelT5EncoderModel" ], "classifier_dropout": 0.0, "d_ff": 10240, "d_kv": 64, "d_model": 4096, "decoder_start_token_id": 0, "dense_act_fn": "gelu_new", "dropout_rate": 0.1, "eos_token_id": 1, "feed_forward_proj": "gated-gelu", "initializer_factor": 1.0, "is_encoder_decoder": true, "is_gated_act": true, "layer_norm_epsilon": 1e-06, "model_type": "umt5t5", "num_decoder_layers": 24, "num_heads": 64, "num_layers": 24, "output_past": true, "pad_token_id": 0, "relative_attention_max_distance": 128, "relative_attention_num_buckets": 32, "scalable_attention ": true, "tie_word_embeddings": false, "tokenizer_class": "T5Tokenizer", "torch_dtype": "bfloat16", "transformers_version": "4.53.2", "use_cache": true, "vocab_size": 256384 32128 }

tokenizer

CLIPTokenizer

None

None

0

0

None

tokenizer_2

T5TokenizerFast

None

None

0

0

None

transformer

WanTransformer3DModelFluxTransformer2DModel

xpu:0

torch.bfloat16

1428849158411901408320

11381279

FrozenDict({'patch_size': [ 1, 2, 2], 'num_attention_heads': 40, 'attention_head_dim': 128, 'in_channels': 1664, 'out_channels': 16, 'text_dim': 4096, 'freq_dim': 256, 'ffn_dim': 13824None, 'num_layers': 40, 'cross_attn_norm': True, 'qk_norm': 'rms_norm_across_heads', 'eps': 1e-06, 'image_dim': None, 'added_kv_proj_dim': None, 'rope_max_seq_len': 1024, 'pos_embed_seq_len': None, '_class_name': 'WanTransformer3DModel', '_diffusers_version': '0.35.0.dev0', '_name_or_path': '/mnt/models/Diffusers/models--Wan-AI--Wan2.2-T2V-A14B-Diffusers/snapshots/20fb953f43ad58be9a9614a89fde4653f4ae5947/transformer'})19,

scheduler

FlowMatchEulerDiscreteScheduler

scheduler

UniPCMultistepScheduler

None

None

0

0

FrozenDict({'num_train_timesteps': 1000, 'beta_startshift': 0.0001, 'beta_end': 0.023.0, 'beta_schedule': 'linear', 'trained_betas': None, 'solver_order': 2, 'prediction_type': 'flow_prediction', 'thresholding': False, 'dynamic_thresholding_ratio': 0.995, 'sample_max_value': 1.0, 'predict_x0': True, 'solver_type': 'bh2', 'lower_order_final': True, 'disable_corrector': [], 'solver_puse_dynamic_shifting': True, 'base_shift': 0.5, 'max_shift': 1.15, 'base_image_seq_len': 256, 'max_image_seq_len': 4096, 'invert_sigmas': False, 'shift_terminal': None, 'use_karras_sigmas': False, 'use_exponential_sigmas': False, 'use_beta_sigmas': False, 'use_flow_sigmas': True, 'flow_shift': 3.0, 'timestep_spacing': 'linspace', 'steps_offset': 0, 'final_sigmas_time_shift_type': 'zeroexponential', 'rescale_betas_zero_snrstochastic_sampling': False, '_use_dynamicdefault_shiftingvalues': False, ['time_shift_type': 'exponential', 'use_classexponential_namesigmas': 'UniPCMultistepScheduler', 'invert_diffusers_version': '0.35.0.dev0'})

transformer_2

WanTransformer3DModel

xpu:0

torch.bfloat16

14288491584

1138

FrozenDict({'patch_size': [1, 2, 2], 'num_attention_heads': 40, 'attention_head_dim': 128, 'in_channels': 16, 'out_channels': 16, 'text_dim': 4096, 'freq_dim': 256, 'ffn_dim': 13824, 'num_layers': 40, 'cross_attn_norm': True, 'qk_norm': 'rms_norm_across_heads', 'eps': 1e-06, 'image_dim': None, 'added_kv_proj_dim': None, 'rope_max_seq_len': 1024, 'pos_embed_seq_len': None, '_sigmas', 'use_karras_sigmas', 'stochastic_sampling', 'shift_terminal', 'use_beta_sigmas'], '_class_name': 'WanTransformer3DModelFlowMatchEulerDiscreteScheduler', '_diffusers_version': '0.3530.0.dev0', '_name_or_path': '/mnt/models/Diffusers/models--Wan-AI--Wan2.2-T2V-A14B-Diffusers/snapshots/20fb953f43ad58be9a9614a89fde4653f4ae5947/transformer_2'})

boundaryimage_ratioencoder

floatNoneType

None

None

0

0

None

expandfeature_timestepsextractor

boolNoneType

None

None

0

0

None

_name_or_path

str

None

None

0

0

None

...