Sign In

Color Contrast

52

288

1.2k

15

Verified:

SafeTensor

Type

LoRA

Stats

288

1.2k

414

Reviews

Published

Jul 14, 2025

Base Model

Illustrious

Training

Steps: 9,080
Epochs: 40

Usage Tips

Clip Skip: 2

Trigger Words

clrcon
clrcon_red
clrcon_green
clrcon_blue
clrcon_yellow
clrcon_orange
clrcon_purple
clrcon_pink
clrcon_bw
clrcon_soft

Hash

AutoV2
86E747E6B9

Create striking, visually appealing images that emphasize contrasting colors. Pick one, two, or three of your favorite colors and let this LoRA mix them together in an eye-catching design.

I recommend playing around a bit with it, as there's a lot of variety in the images it can create, but you have to mix things up to get its full potential. Occasionally it might emphasize one color to the exclusion of another, in which case you might want to try different resolutions, different tag orders, or pairing up the color tags with different objects in the image, e.g. instead of "clrcon_green, 1girl, medium hair," try "1girl, clrcon_green hair, medium hair," and so forth.

The keyword is "clrcon." There are also keywords for common colors: clrcon_red, clrcon_blue, clrcon_green, clrcon_yellow, clrcon_orange, clrcon_purple, clrcon_pink. There's also a clrcon_soft to emphasize a softer contrast with a secondary color palette, although that might give you mixed results, but it doesn't hurt to play around with it. Finally, there's clrcon_bw, which is specifically for images where black or white are used in creating contrast, either with each other or with another color.

The sample images were mostly created using Animij, but it's worked well with all the checkpoints I've tried so far.

Just using the clrcon tag by itself will already naturally create appealing contrast, but you can be more specific and it'll happily follow along. Have fun!

This model was trained in kohya_ss on a dataset of 435 that I manually labeled myself. Below is the configuration for the training. As always, please let me know if there's anything you're curious about pertaining to the model.

 {
  "LoRA_type": "Standard",
  "LyCORIS_preset": "full",
  "adaptive_noise_scale": 0,
  "additional_parameters": "--log_prefix=clrcon",
  "async_upload": false,
  "block_alphas": "",
  "block_dims": "",
  "block_lr_zero_threshold": "",
  "bucket_no_upscale": false,
  "bucket_reso_steps": 32,
  "bypass_mode": false,
  "cache_latents": true,
  "cache_latents_to_disk": true,
  "caption_dropout_every_n_epochs": 0,
  "caption_dropout_rate": 0,
  "caption_extension": ".txt",
  "clip_skip": 2,
  "color_aug": false,
  "constrain": 0,
  "conv_alpha": 4,
  "conv_block_alphas": "",
  "conv_block_dims": "",
  "conv_dim": 4,
  "dataset_config": "",
  "debiased_estimation_loss": false,
  "decompose_both": false,
  "dim_from_weights": false,
  "dora_wd": false,
  "down_lr_weight": "",
  "dynamo_backend": "no",
  "dynamo_mode": "default",
  "dynamo_use_dynamic": false,
  "dynamo_use_fullgraph": false,
  "enable_bucket": true,
  "epoch": 0,
  "extra_accelerate_launch_args": "",
  "factor": -1,
  "flip_aug": true,
  "fp8_base": false,
  "full_bf16": false,
  "full_fp16": false,
  "gpu_ids": "",
  "gradient_accumulation_steps": 1,
  "gradient_checkpointing": true,
  "huber_c": 0.1,
  "huber_schedule": "snr",
  "huggingface_path_in_repo": "",
  "huggingface_repo_id": "",
  "huggingface_repo_type": "",
  "huggingface_repo_visibility": "",
  "huggingface_token": "",
  "ip_noise_gamma": 0,
  "ip_noise_gamma_random_strength": false,
  "keep_tokens": 1,
  "learning_rate": 5e-05,
  "log_tracker_config": "",
  "log_tracker_name": "",
  "log_with": "tensorboard",
  "logging_dir": "D:/Coding/kohya_ss/logs",
  "loss_type": "huber",
  "lr_scheduler": "cosine",
  "lr_scheduler_args": "",
  "lr_scheduler_num_cycles": 1,
  "lr_scheduler_power": 1,
  "lr_warmup": 0,
  "main_process_port": 0,
  "masked_loss": false,
  "max_bucket_reso": 2048,
  "max_data_loader_n_workers": 0,
  "max_grad_norm": 1,
  "max_resolution": "1024,1024",
  "max_timestep": 1000,
  "max_token_length": 75,
  "max_train_epochs": 40,
  "max_train_steps": 0,
  "mem_eff_attn": false,
  "metadata_author": "restlessboy",
  "metadata_description": "",
  "metadata_license": "",
  "metadata_tags": "clrcon",
  "metadata_title": "clrcon",
  "mid_lr_weight": "",
  "min_bucket_reso": 64,
  "min_snr_gamma": 0,
  "min_timestep": 0,
  "mixed_precision": "fp16",
  "model_list": "custom",
  "module_dropout": 0.1,
  "multi_gpu": false,
  "multires_noise_discount": 0,
  "multires_noise_iterations": 0,
  "network_alpha": 32,
  "network_dim": 32,
  "network_dropout": 0.15,
  "network_weights": "",
  "noise_offset": 0.0357,
  "noise_offset_random_strength": false,
  "noise_offset_type": "Original",
  "num_cpu_threads_per_process": 2,
  "num_machines": 1,
  "num_processes": 1,
  "optimizer": "AdamW",
  "optimizer_args": "",
  "output_dir": "D:/Coding/kohya_ss/outputs/model",
  "output_name": "clrcon",
  "persistent_data_loader_workers": false,
  "pretrained_model_name_or_path": "D:/ai-models/Illustrious-v1",
  "prior_loss_weight": 1,
  "random_crop": false,
  "rank_dropout": 0.1,
  "rank_dropout_scale": false,
  "reg_data_dir": "",
  "rescaled": false,
  "resume": "",
  "resume_from_huggingface": "",
  "sample_every_n_epochs": 1,
  "sample_every_n_steps": 0,
  "sample_sampler": "euler_a",
  "save_every_n_epochs": 10,
  "save_every_n_steps": 0,
  "save_last_n_steps": 0,
  "save_last_n_steps_state": 0,
  "save_model_as": "safetensors",
  "save_precision": "fp16",
  "save_state": false,
  "save_state_on_train_end": false,
  "save_state_to_huggingface": false,
  "scale_v_pred_loss_like_noise_pred": false,
  "scale_weight_norms": 1,
  "sdxl": true,
  "sdxl_cache_text_encoder_outputs": false,
  "sdxl_no_half_vae": true,
  "seed": 11,
  "shuffle_caption": false,
  "stop_text_encoder_training_pct": 0,
  "text_encoder_lr": 2.5e-05,
  "train_batch_size": 2,
  "train_data_dir": "D:/Coding/kohya_ss/dataset/img",
  "train_norm": false,
  "train_on_input": false,
  "training_comment": "trigger: clrcon",
  "unet_lr": 5e-05,
  "unit": 1,
  "up_lr_weight": "",
  "use_cp": false,
  "use_scalar": false,
  "use_tucker": false,
  "v2": false,
  "v_parameterization": false,
  "v_pred_like_loss": 0,
  "vae": "",
  "vae_batch_size": 0,
  "wandb_api_key": "",
  "wandb_run_name": "",
  "weighted_captions": false,
  "xformers": "xformers"
}