# 点滴 v001
### コマンドライン設定
- accelerate.EXE launch --dynamo_backend no --dynamo_mode default --mixed_precision bf16 --num_processes 2 --num_machines 1 --num_cpu_threads_per_process 2
- sdxl_train_network.py --config_file config_lora-20240618-184456.toml
- --lr_scheduler_type CosineAnnealingLR
- --lr_scheduler_args T_max=1040 eta_min=1e-7
- --network_args loraplus_lr_ratio=16
- --alpha_mask
### toml設定ファイル
- bucket_no_upscale = true
- bucket_reso_steps = 64
- cache_latents = true
- cache_latents_to_disk = true
- caption_dropout_rate = 0.05
- caption_extension = ".txt"
- clip_skip = 1
- dynamo_backend = "no"
- enable_bucket = true
- epoch = 100
- gradient_accumulation_steps = 1
- gradient_checkpointing = true
- huber_c = 0.1
- huber_schedule = "snr"
- keep_tokens = 1
- learning_rate = 0.0001
- log_with = "wandb"
- logging_dir = "log"
- loss_type = "smooth_l1"
- lr_scheduler = "cosine"
- lr_scheduler_args = []
- lr_scheduler_num_cycles = 1
- lr_scheduler_power = 1
- max_bucket_reso = 2048
- max_data_loader_n_workers = 0
- max_grad_norm = 0.1
- max_timestep = 1000
- max_token_length = 225
- max_train_epochs = 100
- max_train_steps = 10400
- mem_eff_attn = true
- min_bucket_reso = 256
- min_snr_gamma = 5
- mixed_precision = "bf16"
- network_alpha = 32
- network_args = ["rank_dropout=0.2", "module_dropout=0.2"]
- network_dim = 32
- network_dropout = 0.2
- network_module = "networks.lora"
- no_half_vae = true
- noise_offset_type = "Original"
- optimizer_args = ["weight_decay=0.01"]
- optimizer_type = "AdamW8bit"
- output_dir = "output"
- output_name = "intravenousdrip-XLPony_001"
- pretrained_model_name_or_path = "ponyDiffusionV6XL_v6StartWithThisOne.safetensors"
- prior_loss_weight = 0.1
- resolution = "1024,1024"
- sample_prompts = "prompt.txt"
- sample_sampler = "euler_a"
- save_every_n_epochs = 10
- save_model_as = "safetensors"
- save_precision = "bf16"
- save_state = true
- seed = 1
- shuffle_caption = true
- text_encoder_lr = 1e-5
- train_batch_size = 1
- unet_lr = 0.0001
- xformers = true