mathofdynamic commited on
Commit
52e8771
1 Parent(s): b172c6b

Upload folder using huggingface_hub

Browse files
dataset.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [general]
2
+ shuffle_caption = false
3
+ caption_extension = '.txt'
4
+ keep_tokens = 1
5
+
6
+ [[datasets]]
7
+ resolution = 1024
8
+ batch_size = 1
9
+ keep_tokens = 1
10
+
11
+ [[datasets.subsets]]
12
+ image_dir = '/app/fluxgym/datasets/mamali-lora-03'
13
+ class_tokens = 'mamali'
14
+ num_repeats = 10
mamali-lora-03-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6d98dffd31aca3240b73d7a8402350d9425e9a15a9754ff34c382a5efd82f5
3
+ size 39764512
mamali-lora-03-000008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8bcc97e039c831172b5dee64d63975f1ce58a56de5cfa4863ec7cb0b2c6c17a
3
+ size 39764512
mamali-lora-03-000012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ba306eb8800d45704e76a612954a0711a99b548ab073e05e22a602e42d4b41
3
+ size 39764512
sample_prompts.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ mamali
train.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch \
2
+ --mixed_precision bf16 \
3
+ --num_cpu_threads_per_process 1 \
4
+ sd-scripts/flux_train_network.py \
5
+ --pretrained_model_name_or_path "/app/fluxgym/models/unet/flux1-dev.sft" \
6
+ --clip_l "/app/fluxgym/models/clip/clip_l.safetensors" \
7
+ --t5xxl "/app/fluxgym/models/clip/t5xxl_fp16.safetensors" \
8
+ --ae "/app/fluxgym/models/vae/ae.sft" \
9
+ --cache_latents_to_disk \
10
+ --save_model_as safetensors \
11
+ --sdpa --persistent_data_loader_workers \
12
+ --max_data_loader_n_workers 2 \
13
+ --seed 42 \
14
+ --gradient_checkpointing \
15
+ --mixed_precision bf16 \
16
+ --save_precision bf16 \
17
+ --network_module networks.lora_flux \
18
+ --network_dim 4 \
19
+ --optimizer_type adamw8bit \
20
+ --learning_rate 8e-4 \
21
+ --cache_text_encoder_outputs \
22
+ --cache_text_encoder_outputs_to_disk \
23
+ --fp8_base \
24
+ --highvram \
25
+ --max_train_epochs 16 \
26
+ --save_every_n_epochs 4 \
27
+ --dataset_config "/app/fluxgym/outputs/mamali-lora-03/dataset.toml" \
28
+ --output_dir "/app/fluxgym/outputs/mamali-lora-03" \
29
+ --output_name mamali-lora-03 \
30
+ --timestep_sampling shift \
31
+ --discrete_flow_shift 3.1582 \
32
+ --model_prediction_type raw \
33
+ --guidance_scale 1 \
34
+ --loss_type l2 \