Sign In

Human Focus Photography

60

843

0

33

Verified:

SafeTensor

Type

LoRA

Stats

621

0

Reviews

Published

Sep 2, 2025

Base Model

Qwen

Training

Epochs: 41

Usage Tips

Strength: 1

Hash

AutoV2
AFB7670477

Human Focus Photography is a general-purpose LoRA trained on diverse photographic portraits and lifestyle images. It is intended to reduce bias, enhance realism, lighting, and natural detail across a wide range of subjects, making it well-suited for portraits, street scenes, fashion, and candid photography styles.

Workflow is included in the sample images.

Training for v1.0 took around 17 hours on my 5090

SimpleTuner training config:

{
    "aspect_bucket_rounding": 2,
    "base_model_precision": "int8-quanto",
    "caption_dropout_probability": 0.05,
    "checkpointing_steps": 250,
    "checkpoints_total_limit": 40,
    "compress_disk_cache": false,
    "data_backend_config": "/data/datasets/config/Human_Focus_Photography_v1.0.json",
    "disable_benchmark": true,
    "disable_bucket_pruning": true,
    "flow_schedule_shift": 1.73,
    "gradient_accumulation_steps": 8,
    "gradient_checkpointing": true,
    "ignore_final_epochs": true,
    "learning_rate": 0.000025,
    "lora_type": "lycoris",
    "lr_scheduler": "cosine",
    "lr_warmup_steps": 100,
    "lycoris_config": "/data/config/lycoris_config_283.19M.json",
    "max_grad_norm": 0.5,
    "max_train_steps": 0,
    "minimum_image_size": 0,
    "mixed_precision": "bf16",
    "model_family": "qwen_image",
    "model_flavour": "v1.0",
    "model_type": "lora",
    "num_eval_images": 25,
    "num_train_epochs": 100,
    "optimizer": "optimi-lion",
    "optimizer_config": "weight_decay=0.0",
    "output_dir": "/data/output/loras/Human_Focus_Photography_v1.0",
    "push_checkpoints_to_hub": false,
    "push_to_hub": false,
    "quantize_activations": false,
    "quantize_via": "cpu",
    "report_to": "none",
    "resolution_type": "pixel_area",
    "resolution": 1024,
    "resume_from_checkpoint": "latest",
    "seed": 42,
    "skip_file_discovery": false,
    "train_batch_size": 1,
    "use_ema": true,
    "vae_batch_size": 2,
    "validation_disable": true
}

And the Lycoris config:

{
    "algo": "lokr",
    "multiplier": 1.0,
    "linear_dim": 100000,
    "linear_alpha": 1,
    "factor": 12,
    "apply_preset": {
        "target_module": [
            "Attention",
            "FeedForward"
        ],
        "module_algo_map": {
            "Attention": {
                "factor": 12
            },
            "FeedForward": {
                "factor": 6
            }
        }
    }
}