ddPn08 commited on
Commit
ae97ea7
·
verified ·
0 Parent(s):

first commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ thumbnail: >-
4
+ https://huggingface.co/ddPn08/subtle/resolve/main/images/main.png
5
+ tags:
6
+ - stable-diffusion
7
+ - text-to-image
8
+ - safetensors
9
+ - diffusers
10
+ inference: true
11
+ widget:
12
+ - text: >-
13
+ masterpiece, best quality, high quality, newest, new, exceptional, best aesthetic, anime, 1girl, short hair, black hair, messy hair, black eyes, ahoge, white shirt, outdoors, dappled sunlight, leaf, looking at viewer,
14
+ example_title: example 1girl
15
+ - text: >-
16
+ masterpiece, best quality, high quality, newest, new, exceptional, best aesthetic, anime, 1boy, short hair, black hair, messy hair, black eyes, ahoge, white shirt, outdoors, dappled sunlight, leaf, looking at viewer,
17
+ example_title: example 1boy
18
+ language:
19
+ - en
20
+ library_name: diffusers
21
+ ---
22
+
23
+ <div align="center">
24
+
25
+ ![](./images/main.png)
26
+
27
+ </div>
28
+ <h1 align="center"><b>Subtly</b></h1>
29
+ <p align="center">A very subtle model. And an animated model that does not use any NAI leaks. </p>
30
+
31
+ <h1 align="center">Downloads</h1>
32
+ <div align="center">
33
+
34
+ [![](https://img.shields.io/static/v1?label=Download&message=Checkpoint&labelColor=333333&color=72B76C&style=for-the-badge)](https://huggingface.co/ddPn08/subtle/resolve/main/subtly-fp32.ckpt)
35
+ <span></span>
36
+ [![](https://img.shields.io/static/v1?label=Download&message=SafeTensors&labelColor=333333&color=72B76C&style=for-the-badge)](https://huggingface.co/ddPn08/subtle/resolve/main/subtly-fp32.safetensors)
37
+
38
+ </div>
39
+
40
+
41
+ <br >
42
+
43
+ # Examples
44
+ ![](./images/example-1.png)
45
+
46
+ <details>
47
+ <summary>Prompt</summary>
48
+
49
+ ```yaml
50
+ prompt: masterpiece, best quality, high quality, newest, new, exceptional, best aesthetic, BREAK
51
+ [anime], 1girl, black hair, short hair, messy hair, wavy mouth, sweat, nude, BREAK
52
+ simple background, looking at viewer,
53
+ negative_prompt: old, oldest, worst quality, low quality, normal quality, deleted, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, head out of frame,
54
+ Steps: 29
55
+ Sampler: Euler a
56
+ CFG scale: 8
57
+ Seed: 3159754303
58
+ Size: 768x768
59
+ Model hash: aa0bd9fbc5
60
+ Model: subtly-fp32
61
+ Clip skip: 2
62
+ ```
63
+ </details>
64
+
65
+
66
+ <br>
67
+
68
+ ![](./images/example-2.png)
69
+
70
+ <details>
71
+ <summary>Prompt</summary>
72
+
73
+ ```yaml
74
+ prompt: masterpiece, best quality, high quality, newest, new, exceptional, best aesthetic, BREAK
75
+ 1girl, short hair, messy hair, ahoge, jitome, BREAK
76
+ outdoors, dappled sunlight, leaf, looking at viewer,
77
+ negative_prompt: old, oldest, worst quality, low quality, normal quality, deleted, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry
78
+ Steps: 29
79
+ Sampler: Euler a
80
+ CFG scale: 8
81
+ Seed: 913456778
82
+ Size: 704x704
83
+ Model hash: aa0bd9fbc5
84
+ Model: subtly-fp32
85
+ Clip skip: 2
86
+ ```
87
+ </details>
88
+
89
+
90
+ <br>
91
+
92
+ ![](./images/example-3.png)
93
+
94
+ <details>
95
+ <summary>Prompt</summary>
96
+
97
+ ```yaml
98
+ prompt: masterpiece, best quality, high quality, newest, new, exceptional, best aesthetic, BREAK
99
+ [anime], 1girl, long hair, black hair, black eyes, open mouth, bikini, BREAK
100
+ outdoors, ocean, beach, palm tree,
101
+ negative_prompt: worst quality, low quality, normal quality, deleted, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, old, oldest
102
+
103
+ Steps: 29
104
+ Sampler: Euler a
105
+ CFG scale: 8
106
+ Seed: 3704255935
107
+ Size: 704x704
108
+ Model hash: aa0bd9fbc5
109
+ Model: subtly-fp32
110
+ Clip skip: 2
111
+ ENSD: 31337
112
+ ```
113
+ </details>
114
+
115
+
116
+ <br>
117
+
118
+ # About this
119
+ This model is trained based on Waifu Diffusion 1.5. NovelAI leakage models are not included at all. But it still won't beat NovelAI. (Subtle, isn't it?)
120
+
121
+ The Waifu Diffusion 1.5 prompt differs slightly from the NAI-based model.
122
+
123
+ For more information, please refer to this release note.
124
+
125
+ https://cafeai.notion.site/WD-1-5-Beta-Release-Notes-967d3a5ece054d07bb02cba02e8199b7
126
+
127
+ <br>
128
+
129
+ # limitation
130
+
131
+ ## Realistic pictures are output.
132
+ Waifu Diffusion 1.5 learns many images from Instagram and does not output anime images by default. Therefore, you need to put `anime` in the prompt. (You may still get realistic images. Subtle.)
133
+
134
+ It can also be controlled by putting `instagram`, `real-life` in the negative prompt. However, these are not recommended because they may affect the human anatomy and the background.
135
+
136
+
137
+ <br >
138
+
139
+ ## NSFW
140
+ This model is very vulnerable to NSFW. It is weak in it's own right. We are currently working on making it stronger to NSFW but it is not going well. (It's subtle.)
141
+
142
+ I would like to know if anyone has any knowledge about this.
143
+
144
+
145
+ <br >
146
+
147
+ # License
148
+ Subtly is released under the Fair AI Public License 1.0-SD (https://freedevproject.org/faipl-1.0-sd/).
images/example-1.png ADDED

Git LFS Details

  • SHA256: 62470b0c85a4e232b032b325980f38355b58781cae3f8a9c8753713e8d466aba
  • Pointer size: 131 Bytes
  • Size of remote file: 867 kB
images/example-2.png ADDED

Git LFS Details

  • SHA256: 5a69d92911e965f660f29793b501b7def28d5b27898806d68633f252569ddddf
  • Pointer size: 131 Bytes
  • Size of remote file: 798 kB
images/example-3.png ADDED

Git LFS Details

  • SHA256: d2ed244b60697c3b485674b240841f45dbe1a05312343b6bf25e3e9264325174
  • Pointer size: 131 Bytes
  • Size of remote file: 795 kB
images/main.png ADDED

Git LFS Details

  • SHA256: 07b6b3612e82d710853f04b701b6cfd0accc0931c48077f739e4614f5844dd11
  • Pointer size: 132 Bytes
  • Size of remote file: 1.25 MB
model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.10.2",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "requires_safety_checker": null,
9
+ "safety_checker": [
10
+ null,
11
+ null
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "DDIMScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "tokenizer": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.10.2",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "v_prediction",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "trained_betas": null
14
+ }
subtly-fp16.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767ba95f77c67f26839af3e8559aafe4c7144eb470d40a503e05c590ea6e7768
3
+ size 2580321435
subtly-fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562e2f5b87dbc5faad82c3a2ae2e7c550105013705be812357f1d078ddb9a86c
3
+ size 2580068398
subtly-fp32.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7ac82220a8e56421027a84187c1d7f8f0d50c023c89e3c2c5443105592d260
3
+ size 5160227439
subtly-fp32.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa0bd9fbc5829d5b510cd1cc6e308c412f1a3ba184454ef013e81aa52462d289
3
+ size 5159974886
text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1024,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 23,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 512,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.26.0",
23
+ "vocab_size": 49408
24
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5162e48803d14cb34adba2d974d7d4b1a97eef7b2d1fc3565fcef80d1b5ef08f
3
+ size 1361674657
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "waifu-diffusion/wd-1-5-beta",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.10.2",
4
+ "act_fn": "silu",
5
+ "attention_head_dim": [
6
+ 5,
7
+ 10,
8
+ 20,
9
+ 20
10
+ ],
11
+ "block_out_channels": [
12
+ 320,
13
+ 640,
14
+ 1280,
15
+ 1280
16
+ ],
17
+ "center_input_sample": false,
18
+ "cross_attention_dim": 1024,
19
+ "down_block_types": [
20
+ "CrossAttnDownBlock2D",
21
+ "CrossAttnDownBlock2D",
22
+ "CrossAttnDownBlock2D",
23
+ "DownBlock2D"
24
+ ],
25
+ "downsample_padding": 1,
26
+ "dual_cross_attention": false,
27
+ "flip_sin_to_cos": true,
28
+ "freq_shift": 0,
29
+ "in_channels": 4,
30
+ "layers_per_block": 2,
31
+ "mid_block_scale_factor": 1,
32
+ "norm_eps": 1e-05,
33
+ "norm_num_groups": 32,
34
+ "num_class_embeds": null,
35
+ "only_cross_attention": false,
36
+ "out_channels": 4,
37
+ "sample_size": 64,
38
+ "up_block_types": [
39
+ "UpBlock2D",
40
+ "CrossAttnUpBlock2D",
41
+ "CrossAttnUpBlock2D",
42
+ "CrossAttnUpBlock2D"
43
+ ],
44
+ "upcast_attention": false,
45
+ "use_linear_projection": false
46
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c7ac56b03278ee5a5676645b6190942e404262690195c1ef5bbd5f1ccf1bd60
3
+ size 3463925413
vae/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.10.2",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "in_channels": 3,
18
+ "latent_channels": 4,
19
+ "layers_per_block": 2,
20
+ "norm_num_groups": 32,
21
+ "out_channels": 3,
22
+ "sample_size": 256,
23
+ "up_block_types": [
24
+ "UpDecoderBlock2D",
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D"
28
+ ]
29
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e174991e5609bc5c2b3995e3f223fb2c5f0ae3be307fa9591b351d837a08770
3
+ size 334711857