damian0815
commited on
Commit
•
3abf93e
1
Parent(s):
56504bb
Upload 10 files
Browse files- .gitattributes +4 -0
- _everydream2_config/grate_prompts.json +68 -0
- _everydream2_config/sample_prompts.json +38 -0
- _everydream2_config/train.json +43 -0
- _everydream2_config/v1-inference.yaml +70 -0
- _everydream2_config/validation_default.json +20 -0
- grates/pashahlis-val-test_as-received_lr1e-6-1024x512.jpg +3 -0
- grates/pashahlis-val-test_as-received_lr1e-6-640x896.jpg +3 -0
- grates/pashahlis-val-test_as-received_lr1e-6-768x768.jpg +3 -0
- grates/pashahlis-val-test_as-received_lr1e-6-896x640.jpg +3 -0
- val-test-7e-8.zip +3 -0
.gitattributes
CHANGED
@@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
grates/pashahlis-val-test_as-received_lr1e-6-1024x512.jpg filter=lfs diff=lfs merge=lfs -text
|
36 |
+
grates/pashahlis-val-test_as-received_lr1e-6-640x896.jpg filter=lfs diff=lfs merge=lfs -text
|
37 |
+
grates/pashahlis-val-test_as-received_lr1e-6-768x768.jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
grates/pashahlis-val-test_as-received_lr1e-6-896x640.jpg filter=lfs diff=lfs merge=lfs -text
|
_everydream2_config/grate_prompts.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"prompt": "closeshot render cw artstyle admiral"
|
4 |
+
},
|
5 |
+
{
|
6 |
+
"prompt": "longshot cw artstyle flowers on a cliff overlooking a windswept bay in spring"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"prompt": "closeup cw artstyle a tailor sewing a rebel alliance uniform on a desk covered in piles of silk and cotton fabric"
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"prompt": "mediumshot cw artstyle a woman standing on a scifi space station loading dock, glowing machines in the background"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"prompt": "headshot cw artstyle a man with a beard wearing a cloak and boarding a train"
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple, mist, highly detailed"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple, mist, highly detailed"
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple, mist, highly detailed"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple, mist, highly detailed"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"prompt": "digital artwork hq artstyle fantasy ruined scottish castle overlooking the ocean"
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"prompt": "digital artwork hq artstyle fantasy ruined scottish castle overlooking the ocean"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"prompt": "digital artwork hq artstyle fantasy orchard in spring, foxes, rabbits, blossom"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"prompt": "digital artwork hq artstyle fantasy orchard in spring, foxes, rabbits, blossom"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"prompt": "digital artwork hq artstyle fantasy war machines laying siege to a a vibrant fairy village, tanks, bombs, explosions, missiles, smoke, fire, death, killing"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"prompt": "digital artwork hq artstyle fantasy war machines laying siege to a a vibrant fairy village, tanks, bombs, explosions, missiles, smoke, fire, death, killing"
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"prompt": "digital artwork hq artstyle an old man drying laundry in a grocery store"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"prompt": "digital artwork hq artstyle an old man drying laundry in a grocery store"
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"prompt": "closeshot render two snails getting married, a snail wedding ceremony, love, flowers, holy matrimony"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"prompt": "fullshot concept artwork a pigeon wearing a hat in vienna"
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"prompt": "concept render hg artstyle a brick house on the beach in the sunset"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"prompt": "a cartoon still image of an old man drying laundry in a grocery store, anime"
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"prompt": "an oil painting of an old man drying laundry in a grocery store, brushstrokes, canvas, fine art"
|
67 |
+
}
|
68 |
+
]
|
_everydream2_config/sample_prompts.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"scheduler": "dpm++",
|
3 |
+
"num_inference_steps": 23,
|
4 |
+
"batch_size": 3,
|
5 |
+
"size": [896,640],
|
6 |
+
"seed": 555,
|
7 |
+
"cfgs": [7, 4, 1],
|
8 |
+
"samples":
|
9 |
+
[
|
10 |
+
{
|
11 |
+
"prompt": "closeshot render cw artstyle admiral"
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"prompt": "mediumshot cw artstyle a woman standing on a scifi space station loading dock, glowing machines in the background"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"prompt": "headshot cw artstyle a man with a beard wearing a cloak and boarding a train"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple",
|
21 |
+
"seed": 7
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"prompt": "digital artwork hq artstyle fantasy ancient temple",
|
25 |
+
"seed": 8
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"prompt": "digital artwork hq artstyle an old man drying laundry in a grocery store"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"prompt": "a cartoon still image of an old man drying laundry in a grocery store, anime, fine lines"
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"prompt": "an oil painting of an old man drying laundry in a grocery store, brushstrokes, canvas, fine art"
|
35 |
+
}
|
36 |
+
]
|
37 |
+
|
38 |
+
}
|
_everydream2_config/train.json
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"amp": true,
|
3 |
+
"batch_size": 6,
|
4 |
+
"ckpt_every_n_minutes": null,
|
5 |
+
"clip_grad_norm": null,
|
6 |
+
"clip_skip": 0,
|
7 |
+
"cond_dropout": 0.0,
|
8 |
+
"data_root": "/workspace/val-test-7e-8",
|
9 |
+
"disable_textenc_training": false,
|
10 |
+
"disable_xformers": false,
|
11 |
+
"flip_p": 0.0,
|
12 |
+
"gpuid": 0,
|
13 |
+
"gradient_checkpointing": true,
|
14 |
+
"grad_accum": 3,
|
15 |
+
"logdir": "/workspace/logs",
|
16 |
+
"log_step": 25,
|
17 |
+
"lowvram": false,
|
18 |
+
"lr": 1e-06,
|
19 |
+
"lr_decay_steps": 0,
|
20 |
+
"lr_scheduler": "constant",
|
21 |
+
"lr_warmup_steps": null,
|
22 |
+
"max_epochs": 60,
|
23 |
+
"notebook": false,
|
24 |
+
"project_name": "pashashlis-1e-6",
|
25 |
+
"resolution": 768,
|
26 |
+
"resume_ckpt": "runwayml/stable-diffusion-v1-5",
|
27 |
+
"sample_prompts": "sample_prompts.json",
|
28 |
+
"sample_steps": 246,
|
29 |
+
"save_ckpt_dir": null,
|
30 |
+
"save_ckpts_from_n_epochs": 0,
|
31 |
+
"save_every_n_epochs": 5,
|
32 |
+
"save_optimizer": false,
|
33 |
+
"scale_lr": false,
|
34 |
+
"seed": -1,
|
35 |
+
"shuffle_tags": false,
|
36 |
+
"useadam8bit": true,
|
37 |
+
"validation_config": "/everydream2/validation_default.json",
|
38 |
+
"wandb": false,
|
39 |
+
"write_schedule": false,
|
40 |
+
"rated_dataset": false,
|
41 |
+
"rated_dataset_target_dropout_percent": 50,
|
42 |
+
"zero_frequency_noise_ratio": 0.02
|
43 |
+
}
|
_everydream2_config/v1-inference.yaml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
_everydream2_config/validation_default.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"documentation": {
|
3 |
+
"validate_training": "If true, validate the training using a separate set of image/caption pairs, and log the results as `loss/val`. The curve will trend downwards as the model trains, then flatten and start to trend upwards as effective training finishes and the model begins to overfit the training data. Very useful for preventing overfitting, for checking if your learning rate is too low or too high, and for deciding when to stop training.",
|
4 |
+
"val_split_mode": "Either 'automatic' or 'manual', ignored if validate_training is false. 'automatic' val_split_mode picks a random subset of the training set (the number of items is controlled by val_split_proportion) and removes them from training to use as a validation set. 'manual' val_split_mode lets you provide your own folder of validation items (images+captions), specified using 'val_data_root'.",
|
5 |
+
"val_split_proportion": "For 'automatic' val_split_mode, how much of the train dataset that should be removed to use for validation. Typical values are 0.15-0.2 (15-20% of the total dataset). Higher is more accurate but slower.",
|
6 |
+
"val_data_root": "For 'manual' val_split_mode, the path to a folder containing validation items.",
|
7 |
+
"stabilize_training_loss": "If true, stabilize the train loss curves for `loss/epoch` and `loss/log step` by re-calculating training loss with a fixed random seed, and log the results as `loss/train-stabilized`. This more clearly shows the training progress, but it is not enough alone to tell you if you're overfitting.",
|
8 |
+
"stabilize_split_proportion": "For stabilize_training_loss, the proportion of the train dataset to overlap for stabilizing the train loss graph. Typical values are 0.15-0.2 (15-20% of the total dataset). Higher is more accurate but slower.",
|
9 |
+
"every_n_epochs": "How often to run validation (1=every epoch).",
|
10 |
+
"seed": "The seed to use when running validation and stabilization passes."
|
11 |
+
},
|
12 |
+
"validate_training": true,
|
13 |
+
"val_split_mode": "automatic",
|
14 |
+
"val_data_root": null,
|
15 |
+
"val_split_proportion": 0.15,
|
16 |
+
"stabilize_training_loss": false,
|
17 |
+
"stabilize_split_proportion": 0.15,
|
18 |
+
"every_n_epochs": 1,
|
19 |
+
"seed": 555
|
20 |
+
}
|
grates/pashahlis-val-test_as-received_lr1e-6-1024x512.jpg
ADDED
Git LFS Details
|
grates/pashahlis-val-test_as-received_lr1e-6-640x896.jpg
ADDED
Git LFS Details
|
grates/pashahlis-val-test_as-received_lr1e-6-768x768.jpg
ADDED
Git LFS Details
|
grates/pashahlis-val-test_as-received_lr1e-6-896x640.jpg
ADDED
Git LFS Details
|
val-test-7e-8.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:284025bf115d47010287a4e43797fb50a0434678ef063931ab8c1cc0ba37c7f2
|
3 |
+
size 300421345
|