Spaces:
Sleeping
Sleeping
Delete ap1234.py
#1
by
rnlduatm
- opened
- .gitattributes +0 -1
- Text2Long_text.py +2 -2
- ap1234.py +0 -7
- app.py +9 -51
- app3.py +3 -44
- dsavv.py +1 -1
- image.py +0 -20
- manual_4cut_comic.py +0 -41
- model/animagine_xl.py +0 -47
- model/animesai.py +0 -23
- model/generate_sdxl_with_refiner.py +0 -39
- model/ghibli.py +0 -18
- model/output_waifu_cpu.png +0 -3
- model/realistic.py +0 -21
- model/sd_turbo.py +0 -21
- model/waifu.py +0 -43
- requirements.txt +0 -8
.gitattributes
CHANGED
@@ -33,4 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
Text2Long_text.py
CHANGED
@@ -9,7 +9,7 @@ tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
|
|
9 |
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
10 |
|
11 |
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ
|
12 |
-
def generate_korean_story(prompt, max_length=
|
13 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
14 |
|
15 |
outputs = model.generate(
|
@@ -31,7 +31,7 @@ def generate_korean_story(prompt, max_length=100):
|
|
31 |
# 4. μ€ν
|
32 |
if __name__ == "__main__":
|
33 |
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
34 |
-
result = generate_korean_story(user_prompt, max_length=
|
35 |
|
36 |
print("\nπ μμ±λ νκ΅μ΄ μμ€:\n")
|
37 |
print(result)
|
|
|
9 |
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
10 |
|
11 |
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ
|
12 |
+
def generate_korean_story(prompt, max_length=300):
|
13 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
14 |
|
15 |
outputs = model.generate(
|
|
|
31 |
# 4. μ€ν
|
32 |
if __name__ == "__main__":
|
33 |
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
34 |
+
result = generate_korean_story(user_prompt, max_length=500)
|
35 |
|
36 |
print("\nπ μμ±λ νκ΅μ΄ μμ€:\n")
|
37 |
print(result)
|
ap1234.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
def greet(name):
|
4 |
-
return "Helloasdfasdf " + name + "!!"
|
5 |
-
|
6 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,57 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
-
from model.animagine_xl import generate_animagine_xl
|
3 |
-
from model.animesai import generate_animesai
|
4 |
-
from model.generate_sdxl_with_refiner import generate_sdxl_with_refiner
|
5 |
-
from model.ghibli import generate_ghibli
|
6 |
-
from model.realistic import generate_realistic
|
7 |
-
from model.sd_turbo import generate_sd_turbo
|
8 |
-
from model.waifu import generate_waifu
|
9 |
from Text2Long_text import generate_korean_story
|
10 |
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
"
|
17 |
-
"
|
18 |
-
|
19 |
-
"Ghibli": generate_ghibli,
|
20 |
-
"Realistic": generate_realistic,
|
21 |
-
"SD Turbo": generate_sd_turbo,
|
22 |
-
"Waifu Diffusion": generate_waifu,
|
23 |
-
"νκ΅μ΄ κΈ΄ μ΄μΌκΈ° μμ±": lambda prompt: generate_korean_story(prompt, max_length=100),
|
24 |
-
}
|
25 |
-
|
26 |
-
def generate_story_then_images(prompt, selected_models):
|
27 |
-
# 1. μ νμ§ μ€ 'νκ΅μ΄ κΈ΄ μ΄μΌκΈ° μμ±'μ΄ ν¬ν¨λμ΄ μμΌλ©΄ λ¨Όμ μ€ν 리λ₯Ό μμ±
|
28 |
-
story = ""
|
29 |
-
if "νκ΅μ΄ κΈ΄ μ΄μΌκΈ° μμ±" in selected_models:
|
30 |
-
story = generate_korean_story(prompt, max_length=100)
|
31 |
-
# 2. μ΄λ―Έμ§ μμ± λͺ¨λΈλ€μλ μ΄μΌκΈ°(μ€ν 리)κ° μμΌλ©΄ κ·Έκ±Έ ν둬ννΈλ‘ λ£μ΄μ€
|
32 |
-
img_prompt = story if story else prompt
|
33 |
-
images = [
|
34 |
-
MODEL_FUNCTIONS[name](img_prompt)
|
35 |
-
for name in selected_models
|
36 |
-
if name != "νκ΅μ΄ κΈ΄ μ΄μΌκΈ° μμ±"
|
37 |
-
]
|
38 |
-
return images, story
|
39 |
-
|
40 |
-
with gr.Blocks() as demo:
|
41 |
-
gr.Markdown("## μνλ μμ± λͺ¨λΈλ‘ μ΄λ―Έμ§λ₯Ό μμ±νκ±°λ, κΈ΄ νκ΅μ΄ μ΄μΌκΈ°λ₯Ό λ¨Όμ λ§λ€κ³ κ·Έ μ΄μΌκΈ°λ‘ μ΄λ―Έμ§λ₯Ό λ§λ€ μ μμ΅λλ€.")
|
42 |
-
prompt = gr.Textbox(label="ν둬ννΈ(ν
μ€νΈλ₯Ό μ
λ ₯νμΈμ)")
|
43 |
-
models = gr.CheckboxGroup(
|
44 |
-
choices=list(MODEL_FUNCTIONS.keys()),
|
45 |
-
label="μ¬μ©ν λͺ¨λΈμ μ ννμΈμ"
|
46 |
-
)
|
47 |
-
gallery = gr.Gallery(label="μμ± μ΄λ―Έμ§ (μ νν λͺ¨λΈλ³)")
|
48 |
-
long_textbox = gr.Textbox(label="μμ±λ μ΄μΌκΈ°(ν
μ€νΈ)", lines=10, interactive=False)
|
49 |
-
generate_btn = gr.Button("μμ±νκΈ°")
|
50 |
-
|
51 |
-
generate_btn.click(
|
52 |
-
fn=generate_story_then_images,
|
53 |
-
inputs=[prompt, models],
|
54 |
-
outputs=[gallery, long_textbox]
|
55 |
-
)
|
56 |
|
57 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from Text2Long_text import generate_korean_story
|
3 |
|
4 |
+
def make_story(user_prompt):
|
5 |
+
# generate_korean_story ν¨μμ μ μ μ
λ ₯κ³Ό κΈΈμ΄ μ λ¬
|
6 |
+
return generate_korean_story(user_prompt, max_length=500)
|
7 |
|
8 |
+
# Gradio μΈν°νμ΄μ€
|
9 |
+
demo = gr.Interface(
|
10 |
+
fn=make_story,
|
11 |
+
inputs=gr.Textbox(label="ν둬ννΈ(μ΄μΌκΈ° μ£Όμ λλ 첫 λ¬Έμ₯) μ
λ ₯"),
|
12 |
+
outputs=gr.Textbox(label="AIκ° μμ±ν κΈ΄ νκ΅μ΄ μ΄μΌκΈ°")
|
13 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
demo.launch()
|
app3.py
CHANGED
@@ -1,44 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
# 1. λλ°μ΄μ€ μ€μ
|
6 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
-
|
8 |
-
# 2. νκ΅μ΄ GPT-2 λͺ¨λΈκ³Ό ν ν¬λμ΄μ λ‘λ
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
|
10 |
-
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
11 |
-
|
12 |
-
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ (4λ¬Έμ₯λ§ μΆλ ₯)
|
13 |
-
def generate_korean_story(prompt, max_length=300, num_sentences=4):
|
14 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
15 |
-
|
16 |
-
outputs = model.generate(
|
17 |
-
input_ids,
|
18 |
-
max_length=max_length,
|
19 |
-
min_length=100,
|
20 |
-
do_sample=True,
|
21 |
-
temperature=0.9,
|
22 |
-
top_k=50,
|
23 |
-
top_p=0.95,
|
24 |
-
repetition_penalty=1.2,
|
25 |
-
no_repeat_ngram_size=3,
|
26 |
-
eos_token_id=tokenizer.eos_token_id
|
27 |
-
)
|
28 |
-
|
29 |
-
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
30 |
-
|
31 |
-
# λ¬Έμ₯ λ¨μλ‘ μλ₯΄κΈ° (μ κ·ννμμΌλ‘ λ§μΉ¨ν/λ¬Όμν/λλν κΈ°μ€)
|
32 |
-
sentences = re.split(r'(?<=[.?!])\s+', full_text.strip())
|
33 |
-
|
34 |
-
# μμμ 4λ¬Έμ₯λ§ μ ν ν ν©μΉκΈ°
|
35 |
-
story = " ".join(sentences[:num_sentences])
|
36 |
-
return story
|
37 |
-
|
38 |
-
# 4. μ€ν
|
39 |
-
if __name__ == "__main__":
|
40 |
-
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
41 |
-
result = generate_korean_story(user_prompt, max_length=500, num_sentences=4)
|
42 |
-
|
43 |
-
print("\nπ μμ±λ νκ΅μ΄ μμ€ (4λ¬Έμ₯):\n")
|
44 |
-
print(result)
|
|
|
1 |
+
asdasdasd
|
2 |
+
asdasdsa
|
3 |
+
asdasdasdas
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dsavv.py
CHANGED
@@ -1 +1 @@
|
|
1 |
-
fdagabb
|
|
|
1 |
+
fdagabb
|
image.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionXLPipeline
|
2 |
-
import torch
|
3 |
-
|
4 |
-
# λͺ¨λΈ λ‘λ (SDXL Base 1.0)
|
5 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
6 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
7 |
-
torch_dtype=torch.float16,
|
8 |
-
variant="fp16",
|
9 |
-
use_safetensors=True
|
10 |
-
).to("cuda")
|
11 |
-
|
12 |
-
# ν
μ€νΈ ν둬ννΈ
|
13 |
-
prompt = 'κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§'
|
14 |
-
|
15 |
-
|
16 |
-
# μ΄λ―Έμ§ μμ±
|
17 |
-
image = pipe(prompt=prompt).images[0]
|
18 |
-
image.save("output_scene2.png")
|
19 |
-
print("β
μ΄λ―Έμ§ μ μ₯ μλ£: output_scene2.png")
|
20 |
-
print("νμΈμ©")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
manual_4cut_comic.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
-
import torch
|
3 |
-
from PIL import Image
|
4 |
-
import os
|
5 |
-
|
6 |
-
# μ΄λ―Έμ§ μμ± νμ΄νλΌμΈ
|
7 |
-
image_gen = StableDiffusionPipeline.from_pretrained(
|
8 |
-
"CompVis/stable-diffusion-v1-4",
|
9 |
-
torch_dtype=torch.float16,
|
10 |
-
).to("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
-
|
12 |
-
def generate_comic_manual(cuts, output_dir="comic_outputs"):
|
13 |
-
"""
|
14 |
-
cuts: 4κ°μ ν
μ€νΈ(μ»· μ€λͺ
)κ° λ΄κΈ΄ 리μ€νΈ
|
15 |
-
"""
|
16 |
-
if len(cuts) != 4:
|
17 |
-
raise ValueError("μ νν 4κ°μ μ»· μ€λͺ
μ μ
λ ₯ν΄μ£ΌμΈμ.")
|
18 |
-
|
19 |
-
os.makedirs(output_dir, exist_ok=True)
|
20 |
-
|
21 |
-
image_paths = []
|
22 |
-
for i, cut in enumerate(cuts):
|
23 |
-
print(f"μ»· {i+1}: {cut}")
|
24 |
-
image = image_gen(cut).images[0]
|
25 |
-
path = os.path.join(output_dir, f"cut_{i+1}.png")
|
26 |
-
image.save(path)
|
27 |
-
image_paths.append(path)
|
28 |
-
|
29 |
-
print(f"\nβ
μ μ₯ μλ£! μ΄ {len(image_paths)}μ₯μ μ»·μ΄ μμ±λμμ΅λλ€.")
|
30 |
-
return image_paths
|
31 |
-
|
32 |
-
# μ¬μ© μμ
|
33 |
-
if __name__ == "__main__":
|
34 |
-
cuts = [
|
35 |
-
"A cat launches in a rocket into space",
|
36 |
-
"A cat arrives on an alien planet",
|
37 |
-
"The cat meets a robot friend",
|
38 |
-
"The cat returns to Earth and writes in a diary"
|
39 |
-
]
|
40 |
-
|
41 |
-
generate_comic_manual(cuts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/animagine_xl.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
# from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
2 |
-
# import torch
|
3 |
-
|
4 |
-
# def generate_animagine_xl(prompt: str):
|
5 |
-
# model_id = "Linaqruf/animagine-xl"
|
6 |
-
# pipe = StableDiffusionXLPipeline.from_pretrained(
|
7 |
-
# model_id,
|
8 |
-
# torch_dtype=torch.float16,
|
9 |
-
# use_safetensors=True,
|
10 |
-
# variant="fp16"
|
11 |
-
# )
|
12 |
-
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
13 |
-
# pipe = pipe.to("cuda")
|
14 |
-
# image = pipe(prompt=prompt, width=1024, height=1024).images[0]
|
15 |
-
# image.save("output_animagine_xl.png")
|
16 |
-
# print("β
μ μ₯ μλ£: output_animagine_xl.png")
|
17 |
-
# return image
|
18 |
-
|
19 |
-
# if __name__ == "__main__":
|
20 |
-
# prompt = "κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§"
|
21 |
-
# generate_animagine_xl(prompt)
|
22 |
-
|
23 |
-
|
24 |
-
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
25 |
-
import torch
|
26 |
-
|
27 |
-
# (1) λͺ¨λΈκ³Ό μ€μΌμ€λ¬λ₯Ό μ μμμ ν λ²λ§ μ΄κΈ°ν
|
28 |
-
model_id = "Linaqruf/animagine-xl"
|
29 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
30 |
-
model_id,
|
31 |
-
torch_dtype=torch.float16,
|
32 |
-
use_safetensors=True,
|
33 |
-
variant="fp16"
|
34 |
-
)
|
35 |
-
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
36 |
-
pipe = pipe.to("cpu") # λλ "cpu"λ‘ λ³κ²½ κ°λ₯
|
37 |
-
|
38 |
-
# (2) μ΄λ―Έμ§ μμ± ν¨μ
|
39 |
-
def generate_animagine_xl(prompt: str):
|
40 |
-
image = pipe(prompt=prompt, width=1024, height=1024).images[0]
|
41 |
-
image.save("output_animagine_xl.png")
|
42 |
-
print("β
μ μ₯ μλ£: output_animagine_xl.png")
|
43 |
-
return image
|
44 |
-
|
45 |
-
if __name__ == "__main__":
|
46 |
-
prompt = "κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§"
|
47 |
-
generate_animagine_xl(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/animesai.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
2 |
-
import torch
|
3 |
-
|
4 |
-
def generate_animesai(prompt: str):
|
5 |
-
model_id = "enhanceaiteam/AnimeSAI"
|
6 |
-
vae = AutoencoderKL.from_pretrained(
|
7 |
-
"madebyollin/sdxl-vae-fp16-fix",
|
8 |
-
torch_dtype=torch.float16
|
9 |
-
).to("cuda")
|
10 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
11 |
-
model_id,
|
12 |
-
vae=vae,
|
13 |
-
torch_dtype=torch.float16,
|
14 |
-
use_safetensors=True
|
15 |
-
).to("cuda")
|
16 |
-
image = pipe(prompt=prompt, width=1024, height=1024, guidance_scale=7).images[0]
|
17 |
-
image.save("output_animesai.png")
|
18 |
-
print("β
μ μ₯ μλ£: output_animesai.png")
|
19 |
-
return image
|
20 |
-
|
21 |
-
if __name__ == "__main__":
|
22 |
-
prompt = "κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§"
|
23 |
-
generate_animesai(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/generate_sdxl_with_refiner.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
|
2 |
-
import torch
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
def generate_sdxl_with_refiner(prompt: str):
|
6 |
-
# 1λ¨κ³: Base λͺ¨λΈλ‘ μ΄κΈ° μ΄λ―Έμ§ μμ±
|
7 |
-
base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
8 |
-
base_pipe = StableDiffusionXLPipeline.from_pretrained(
|
9 |
-
base_model_id,
|
10 |
-
torch_dtype=torch.float16,
|
11 |
-
variant="fp16",
|
12 |
-
use_safetensors=True
|
13 |
-
).to("cuda")
|
14 |
-
|
15 |
-
base_image = base_pipe(prompt=prompt, num_inference_steps=30).images[0]
|
16 |
-
|
17 |
-
# 2λ¨κ³: Refiner λͺ¨λΈλ‘ μ΄λ―Έμ§ κ°μ
|
18 |
-
refiner_model_id = "stabilityai/stable-diffusion-xl-refiner-1.0"
|
19 |
-
refiner_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
20 |
-
refiner_model_id,
|
21 |
-
torch_dtype=torch.float16,
|
22 |
-
variant="fp16",
|
23 |
-
use_safetensors=True
|
24 |
-
).to("cuda")
|
25 |
-
|
26 |
-
# Refinerλ PIL μ΄λ―Έμ§λ₯Ό μ
λ ₯λ°μ νμ²λ¦¬
|
27 |
-
refined_image = refiner_pipe(
|
28 |
-
prompt=prompt,
|
29 |
-
image=base_image,
|
30 |
-
strength=0.3 # μΌλ§λ λ§μ΄ 보μ ν μ§ (0~1)
|
31 |
-
).images[0]
|
32 |
-
|
33 |
-
refined_image.save("output_sdxl_refined.png")
|
34 |
-
return refined_image
|
35 |
-
|
36 |
-
if __name__ == "__main__":
|
37 |
-
prompt = 'κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§'
|
38 |
-
img = generate_sdxl_with_refiner(prompt)
|
39 |
-
img.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/ghibli.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
-
import torch
|
3 |
-
|
4 |
-
def generate_ghibli(prompt: str):
|
5 |
-
model_id = "nitrosocke/Ghibli-Diffusion"
|
6 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
7 |
-
model_id,
|
8 |
-
torch_dtype=torch.float16,
|
9 |
-
use_safetensors=True
|
10 |
-
).to("cuda")
|
11 |
-
image = pipe(prompt=prompt).images[0]
|
12 |
-
image.save("output_ghibli.png")
|
13 |
-
print("β
μ μ₯ μλ£: output_ghibli.png")
|
14 |
-
return image
|
15 |
-
|
16 |
-
if __name__ == "__main__":
|
17 |
-
prompt = "κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§"
|
18 |
-
generate_ghibli(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/output_waifu_cpu.png
DELETED
Git LFS Details
|
model/realistic.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
-
import torch
|
3 |
-
|
4 |
-
def generate_realistic(prompt: str):
|
5 |
-
model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
6 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
7 |
-
model_id,
|
8 |
-
torch_dtype=torch.float16,
|
9 |
-
use_safetensors=False,
|
10 |
-
# variant="fp16"
|
11 |
-
).to("cuda")
|
12 |
-
|
13 |
-
image = pipe(prompt=prompt).images[0]
|
14 |
-
return image
|
15 |
-
|
16 |
-
if __name__ == "__main__":
|
17 |
-
prompt = 'κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§'
|
18 |
-
image = generate_realistic(prompt)
|
19 |
-
image.save("output_realistic.png")
|
20 |
-
print("β
μ μ₯ μλ£: output_realistic.png")
|
21 |
-
image.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/sd_turbo.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
-
import torch
|
3 |
-
|
4 |
-
def generate_sd_turbo(prompt: str):
|
5 |
-
model_id = "stabilityai/sd-turbo"
|
6 |
-
|
7 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
8 |
-
model_id,
|
9 |
-
torch_dtype=torch.float16,
|
10 |
-
use_safetensors=True
|
11 |
-
).to("cuda")
|
12 |
-
|
13 |
-
image = pipe(prompt=prompt, guidance_scale=0.0).images[0]
|
14 |
-
image.save("output_sd_turbo.png")
|
15 |
-
return image
|
16 |
-
|
17 |
-
if __name__ == "__main__":
|
18 |
-
prompt = "κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§"
|
19 |
-
img = generate_sd_turbo(prompt)
|
20 |
-
print("β
μ μ₯ μλ£: output_sd_turbo.png")
|
21 |
-
img.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/waifu.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
# from diffusers import StableDiffusionPipeline
|
2 |
-
# import torch
|
3 |
-
|
4 |
-
# def generate_waifu(prompt: str):
|
5 |
-
# model_id = "hakurei/waifu-diffusion"
|
6 |
-
# pipe = StableDiffusionPipeline.from_pretrained(
|
7 |
-
# model_id,
|
8 |
-
# torch_dtype=torch.float16,
|
9 |
-
# use_safetensors=False,
|
10 |
-
# revision="fp16"
|
11 |
-
# ).to("cuda")
|
12 |
-
|
13 |
-
# image = pipe(prompt=prompt).images[0]
|
14 |
-
# return image
|
15 |
-
|
16 |
-
# if __name__ == "__main__":
|
17 |
-
# prompt = 'κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§'
|
18 |
-
# image = generate_waifu(prompt)
|
19 |
-
# image.save("output_waifu.png")
|
20 |
-
# print("β
μ μ₯ μλ£: output_waifu.png")
|
21 |
-
# image.show()
|
22 |
-
|
23 |
-
from diffusers import StableDiffusionPipeline
|
24 |
-
import torch
|
25 |
-
|
26 |
-
model_id = "hakurei/waifu-diffusion"
|
27 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
28 |
-
model_id,
|
29 |
-
torch_dtype=torch.float32,
|
30 |
-
use_safetensors=False
|
31 |
-
).to("cpu") # CPUλ‘ λͺ
μ
|
32 |
-
|
33 |
-
def generate_waifu(prompt: str):
|
34 |
-
image = pipe(prompt=prompt).images[0]
|
35 |
-
return image
|
36 |
-
|
37 |
-
|
38 |
-
if __name__ == "__main__":
|
39 |
-
prompt = 'κ·Έλ
λ₯Ό λ°λΌλ³΄λ ν λ¨μμ μΌλ§'
|
40 |
-
image = generate_waifu(prompt)
|
41 |
-
image.save("output_waifu_cpu.png")
|
42 |
-
print("β
μ μ₯ μλ£: output_waifu_cpu.png")
|
43 |
-
image.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
torch
|
2 |
-
gradio
|
3 |
-
transformers
|
4 |
-
diffusers
|
5 |
-
Pillow
|
6 |
-
accelerate
|
7 |
-
safetensors
|
8 |
-
scipy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|