Outo Eguchi style trained on Anima Preview 3. Also known as eguchi saan.
Trigger: @outoeguchi
Note: Pupils may generate randomly. If you don't want pupils to ever show up, either put "pupils" in the negative prompt or "no pupils" in the positive prompt.
The artist almost exclusively draws simple backgrounds, so the Lora is a little overfit on simpler/less detailed backgrounds. Not much can be done about it, unfortunately.
Description
Training parameters (diffusion-pipe):
output_dir = 'output'
dataset = 'dataset.toml'
epochs = 1000
max_steps = 500
micro_batch_size_per_gpu = 4
pipeline_stages = 1
gradient_accumulation_steps = 1
gradient_clipping = 1.0
warmup_steps = 50
lr_scheduler = 'constant'
compile = true
# rex_cycle_steps = 1000
save_every_n_steps = 50
#checkpoint_every_n_minutes = 120
activation_checkpointing = 'unsloth'
#activation_checkpointing = true
save_dtype = 'bfloat16'
caching_batch_size = 1
[model]
type = 'anima'
transformer_path = 'anima-preview3.safetensors'
vae_path = 'qwen_image_vae.safetensors'
qwen_path = 'Qwen3-0.6B'
dtype = 'bfloat16'
timestep_sample_method = 'logit_normal'
sigmoid_scale = 1.0
llm_adapter_lr = 0
cache_text_embeddings = false
shuffle_tags = true
caption_mode = "tags"
tag_delimiter = ', '
debug_caption_processing = false
qwen_nf4 = false
[adapter]
type = 'lora'
rank = 16
dtype = 'bfloat16'
[optimizer]
type = 'came'
lr = 2e-5
weight_decay = 0.05Dataset config:
resolutions = [1024]
enable_ar_bucket = true
min_ar = 0.5
max_ar = 2.0
num_ar_buckets = 7
[[directory]]
path = '1_eguchisaan'
num_repeats = 2