Skip to content

Commit

Permalink
Fix vae path validation
Browse files Browse the repository at this point in the history
  • Loading branch information
bmaltais committed Mar 21, 2024
1 parent c31e6c1 commit aaf0396
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 48 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,5 @@ dataset/**
!dataset/**/.gitkeep
models
data
config.toml
config.toml
sd-scripts
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -382,11 +382,13 @@ The documentation in this section will be moved to a separate document later.

## Change History

### 2024/03/20 (v23.0.15)
### 2024/03/21 (v23.0.15)

- Add support for toml dataset configuration fole to all trainers
- Add new setup menu option to install Triton 2.1.0 for Windows
- Add support for LyCORIS BOFT and DoRA and QLyCORIS options for LoHA, LoKr and LoCon
- Fix issue with vae path validation
- Other fixes

### 2024/03/19 (v23.0.14)

Expand Down
7 changes: 1 addition & 6 deletions kohya_gui/class_advanced_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ def __init__(
headless (bool): Run in headless mode without GUI.
finetuning (bool): Enable model fine-tuning.
training_type (str): The type of training to be performed.
default_vae_dir (str): Default directory for VAE models.
default_output_dir (str): Default directory for output files.
config (dict): Configuration options for the training process.
"""
self.headless = headless
self.finetuning = finetuning
Expand Down Expand Up @@ -369,10 +368,6 @@ def list_state_dirs(path):
outputs=self.resume,
show_progress=False,
)
# self.max_train_epochs = gr.Textbox(
# label='Max train epoch',
# placeholder='(Optional) Override number of epoch',
# )
self.max_data_loader_n_workers = gr.Textbox(
label="Max num workers for DataLoader",
placeholder="(Optional) Override number of epoch. Default: 8",
Expand Down
7 changes: 7 additions & 0 deletions kohya_gui/common_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -1775,6 +1775,13 @@ def validate_path(
if key in ["output_dir", "logging_dir"]:
if not validate_path(value, key, create_if_missing=True):
return False
elif key in ["vae"]:
# Check if it matches the Hugging Face model pattern
if re.match(r"^[\w-]+\/[\w-]+$", value):
log.info("Checking vae... huggingface.co model, skipping validation")
else:
if not validate_path(value, key):
return False
else:
if key not in ["pretrained_model_name_or_path"]:
if not validate_path(value, key):
Expand Down
46 changes: 6 additions & 40 deletions test/config/dreambooth-AdamW8bit.json
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
{
"LoRA_type": "Standard",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"additional_parameters": "",
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"cache_latents": true,
Expand All @@ -15,58 +10,45 @@
"caption_extension": "",
"clip_skip": 2,
"color_aug": false,
"constrain": 0.0,
"conv_alpha": 1,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 1,
"debiased_estimation_loss": false,
"decompose_both": false,
"dim_from_weights": false,
"down_lr_weight": "",
"dataset_config": "",
"enable_bucket": true,
"epoch": 1,
"factor": -1,
"flip_aug": false,
"fp8_base": false,
"full_bf16": false,
"full_fp16": false,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false,
"keep_tokens": "0",
"learning_rate": 5e-05,
"learning_rate_te": 1e-05,
"learning_rate_te1": 1e-05,
"learning_rate_te2": 1e-05,
"log_tracker_config": "",
"log_tracker_name": "",
"logging_dir": "./test/logs",
"lora_network_weights": "",
"lr_scheduler": "constant",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": "",
"lr_scheduler_power": "",
"lr_warmup": 0,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": "0",
"max_grad_norm": 1,
"max_resolution": "512,512",
"max_timestep": 1000,
"max_token_length": "75",
"max_train_epochs": "",
"max_train_steps": "",
"mem_eff_attn": false,
"mid_lr_weight": "",
"min_bucket_reso": 256,
"min_snr_gamma": 0,
"min_timestep": 0,
"mixed_precision": "bf16",
"model_list": "runwayml/stable-diffusion-v1-5",
"module_dropout": 0,
"multi_gpu": false,
"multires_noise_discount": 0,
"multires_noise_iterations": 0,
"network_alpha": 1,
"network_dim": 8,
"network_dropout": 0,
"no_token_padding": false,
"noise_offset": 0.05,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
Expand All @@ -80,10 +62,7 @@
"pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
"prior_loss_weight": 1.0,
"random_crop": false,
"rank_dropout": 0,
"rank_dropout_scale": false,
"reg_data_dir": "",
"rescaled": false,
"resume": "",
"sample_every_n_epochs": 0,
"sample_every_n_steps": 25,
Expand All @@ -97,30 +76,17 @@
"save_precision": "fp16",
"save_state": false,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 0,
"sdxl": false,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_no_half_vae": true,
"seed": "1234",
"shuffle_caption": false,
"stop_text_encoder_training": 0,
"text_encoder_lr": 0.0,
"train_batch_size": 4,
"train_data_dir": "./test/img",
"train_norm": false,
"train_on_input": true,
"training_comment": "",
"unet_lr": 0.0,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_scalar": false,
"use_tucker": false,
"use_wandb": false,
"v2": false,
"v_parameterization": false,
"v_pred_like_loss": 0,
"vae": "",
"vae": "stabilityai/vae",
"vae_batch_size": 0,
"wandb_api_key": "",
"wandb_run_name": "",
Expand Down

0 comments on commit aaf0396

Please sign in to comment.