Just another shitty experiments.
Trained on: illustriousXL_v01;
Artist: gaku.
Recommendations:
str: ~0.8;
cfg: 5;
sampler: euler a, 28 steps;
hires fix: 1.5x, 14 steps, 0.4 denoise;
trigger word: gaku artstyle;
have a bias/overtrain/bad-captions towards sweat and hair intakes so add it in the negative for scene that don't need them.
The idea is to put higher rank on q and v (in this case 128 rank) and train all layers possible ("module type table: {'LoConModule': 1058, 'NormModule': 256}" reported by kohyass) with lower rank, then using rs_lora to scale the different in rank.
Settings:
dim 4, alpha 1, rs_lora=True, train_norm=true,target name:
enable_conv = true
unet_target_module = [
]
unet_target_name = [
"^.*to.(q|v).*$",
"^.*to.(k|out).*$",
"^.*ff.*$",
"^.*proj.(in|out).*$",
"^(?!.*(proj.(in|out)|attn|ff)).*$",
]
text_encoder_target_module = [
]
text_encoder_target_name = [
"^.*(q|v).proj.*$",
"^.*(k|out).proj.*$",
"^.*mlp.*$",
]
[name_algo_map]
[name_algo_map."^.*to.(q|v).*$"]
algo = "lora"
dim = 128
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^.*to.(k|out).*$"]
algo = "lora"
dim = 16
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^.*ff.*$"]
algo = "lora"
dim = 8
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^.*proj.(in|out).*$"]
algo = "lora"
dim = 4
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^(?!.*(proj.(in|out)|attn|ff)).*$"]
algo = "lora"
dim = 4
alpha = 1
rs_lora = true
[name_algo_map."^.*(q|v).proj.*$"]
algo = "lora"
dim = 128
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^.*(k|out).proj.*$"]
algo = "lora"
dim = 16
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true
[name_algo_map."^.*mlp.*$"]
algo = "lora"
dim = 8
alpha = 1
rs_lora = true
dora_wd = true
wd_on_output = true