Sign In

Hunyuan Video Configs

Jan 16, 2025
workflows
Hunyuan Video Configs

Many of you asked me to share my configs. So here they are.


hunyuan_video.toml

# Output path for training runs. Each training run makes a new directory in here.
output_dir = '/root/hunyuanlora/output'

# Dataset config file.
dataset = 'examples/dataset.toml'
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
# eval_datasets = [
#     {name = 'something', config = 'path/to/eval_dataset.toml'},
# ]

# training settings

# I usually set this to a really high value because I don't know how long I want to train.
epochs = 60
# Batch size of a single forward/backward pass for one GPU.
micro_batch_size_per_gpu = 1
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
pipeline_stages = 1
# Number of micro-batches sent through the pipeline for each training step.
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
gradient_accumulation_steps = 4
# Grad norm clipping.
gradient_clipping = 1.0
# Learning rate warmup.
warmup_steps = 100

# eval settings

eval_every_n_epochs = 1
eval_before_first_step = true
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
eval_micro_batch_size_per_gpu = 1
eval_gradient_accumulation_steps = 1

# misc settings

# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
save_every_n_epochs = 10
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
#checkpoint_every_n_epochs = 1
checkpoint_every_n_minutes = 120
# Always set to true unless you have a huge amount of VRAM.
activation_checkpointing = true
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
partition_method = 'parameters'
# dtype for saving the LoRA or model, if different from training dtype
save_dtype = 'bfloat16'
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
caching_batch_size = 1
# How often deepspeed logs to console.
steps_per_print = 1
# How to extract video clips for training from a single input video file.
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
# number of frames for that bucket.
# single_beginning: one clip starting at the beginning of the video
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
# default is single_middle
video_clip_mode = 'single_middle'

[model]
# flux, ltx-video, or hunyuan-video
type = 'hunyuan-video'
# Can load Hunyuan Video entirely from the ckpt path set up for the official inference scripts.
#ckpt_path = '/home/anon/HunyuanVideo/ckpts'
# Or you can load it by pointing to all the ComfyUI files.
transformer_path = '/root/hunyuanlora/models/unet/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
vae_path = '/root/hunyuanlora/models/vae/hunyuan_video_vae_bf16.safetensors'
llm_path = '/root/hunyuanlora/models/llava-llama-3-8b-text-encoder-tokenizer'
clip_path = '/root/hunyuanlora/models/clip-vit-large-patch14'
# Base dtype used for all models.
dtype = 'bfloat16'
# Hunyuan Video supports fp8 for the transformer when training LoRA.
transformer_dtype = 'float8'
# How to sample timesteps to train on. Can be logit_normal or uniform.
timestep_sample_method = 'logit_normal'

# flux example
# [model]
# type = 'flux'
# # Path to Huggingface Diffusers directory for Flux
# diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
# # You can override the transformer from a BFL format checkpoint.
# transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
# dtype = 'bfloat16'
# flux_shift = true

# LTV-Video example
# [model]
# type = 'ltx-video'
# diffusers_path = '/data2/imagegen_models/LTX-Video'
# dtype = 'bfloat16'
# timestep_sample_method = 'logit_normal'

[adapter]
type = 'lora'
rank = 32
# Dtype for the LoRA weights you are training.
dtype = 'bfloat16'
# You can initialize the lora weights from a previously trained lora.
#init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'

[optimizer]
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
# Look at train.py for other options. You could also easily edit the file and add your own.
type = 'adamw_optimi'
lr = 2e-5
betas = [0.9, 0.99]
weight_decay = 0.01
eps = 1e-8

dataset.toml

# Resolutions to train on, given as the side length of a square image. You can have multiple sizes here.
resolutions = [512]
# You can give resolutions as (width, height) pairs also. This doesn't do anything different, it's just
# another way of specifying the area (i.e. total number of pixels) you want to train on.
# resolutions = [[1280, 720]]

# Enable aspect ratio bucketing. For the different AR buckets, the final size will be such that
# the areas match the resolutions you configured above.
enable_ar_bucket = true

# The aspect ratio and frame bucket settings may be specified for each [[directory]] entry as well.
# Directory-level settings will override top-level settings.

# Min and max aspect ratios, given as width/height ratio.
min_ar = 0.5
max_ar = 2.0
# Total number of aspect ratio buckets, evenly spaced (in log space) between min_ar and max_ar.
num_ar_buckets = 7

# Can manually specify ar_buckets instead of using the range-style config above.
# Each entry can be width/height ratio, or (width, height) pair. But you can't mix them, because of TOML.
# ar_buckets = [[512, 512], [448, 576]]
# ar_buckets = [1.0, 1.5]

# For video training, you need to configure frame buckets (similar to aspect ratio buckets). There will always
# be a frame bucket of 1 for images. Videos will be assigned to the first frame bucket that the video is greater than or equal to in length.
# But videos are never assigned to the image frame bucket (1); if the video is very short it would just be dropped.
frame_buckets = [1, 33, 65]


[[directory]]
# Path to directory of images/videos, and corresponding caption files. The caption files should match the media file name, but with a .txt extension.
# A missing caption file will log a warning, but then just train using an empty caption.
path = '/root/hunyuanlora/input/'
# The dataset will act like it is duplicated this many times.
num_repeats = 10
# Example of overriding some settings, and using ar_buckets to directly specify ARs.
# ar_buckets = [[448, 576]]
# resolutions = [[448, 576]]
# frame_buckets = [1]


# You can list multiple directories.

# [[directory]]
# path = '/home/anon/data/images/something_else'
# num_repeats = 5

Use 20-25 images captioned with chatGTP or Joycaption

32

Comments