Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,275 +1,70 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import cv2
|
| 3 |
import gradio as gr
|
| 4 |
-
import
|
| 5 |
-
import
|
| 6 |
-
import
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
for i in
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
info = "Error"
|
| 73 |
-
break
|
| 74 |
-
else:
|
| 75 |
-
# print(response.text)
|
| 76 |
-
err_log = "URL error, pleace contact the admin"
|
| 77 |
-
info = "URL error, pleace contact the admin"
|
| 78 |
-
break
|
| 79 |
-
except requests.exceptions.ReadTimeout:
|
| 80 |
-
err_log = "Http Timeout"
|
| 81 |
-
info = "Http Timeout, please try again later"
|
| 82 |
-
except Exception as err:
|
| 83 |
-
err_log = f"Get Exception Error: {err}"
|
| 84 |
-
time.sleep(1)
|
| 85 |
-
get_end_time = time.time()
|
| 86 |
-
print(f"get time used: {get_end_time-get_start_time}")
|
| 87 |
-
print(f"all time used: {get_end_time-get_start_time+post_end_time-post_start_time}")
|
| 88 |
-
if info == "":
|
| 89 |
-
err_log = f"No image after {Max_Retry} retries"
|
| 90 |
-
info = "Too many users, please try again later"
|
| 91 |
-
if info != "Success":
|
| 92 |
-
print(f"Error Log: {err_log}")
|
| 93 |
-
gr.Warning("Too many users, please try again later")
|
| 94 |
-
|
| 95 |
-
return result_img, seed, info
|
| 96 |
-
|
| 97 |
-
def start_tryon(person_img, garment_img, seed, randomize_seed):
|
| 98 |
-
start_time = time.time()
|
| 99 |
-
if person_img is None or garment_img is None:
|
| 100 |
-
return None, None, "Empty image"
|
| 101 |
-
if randomize_seed:
|
| 102 |
-
seed = random.randint(0, MAX_SEED)
|
| 103 |
-
encoded_person_img = cv2.imencode('.jpg', cv2.cvtColor(person_img, cv2.COLOR_RGB2BGR))[1].tobytes()
|
| 104 |
-
encoded_person_img = base64.b64encode(encoded_person_img).decode('utf-8')
|
| 105 |
-
encoded_garment_img = cv2.imencode('.jpg', cv2.cvtColor(garment_img, cv2.COLOR_RGB2BGR))[1].tobytes()
|
| 106 |
-
encoded_garment_img = base64.b64encode(encoded_garment_img).decode('utf-8')
|
| 107 |
-
|
| 108 |
-
url = "http://" + os.environ['tryon_url']
|
| 109 |
-
token = os.environ['token']
|
| 110 |
-
cookie = os.environ['Cookie']
|
| 111 |
-
referer = os.environ['referer']
|
| 112 |
-
|
| 113 |
-
headers = {'Content-Type': 'application/json', 'token': token, 'Cookie': cookie, 'referer': referer}
|
| 114 |
-
data = {
|
| 115 |
-
"clothImage": encoded_garment_img,
|
| 116 |
-
"humanImage": encoded_person_img,
|
| 117 |
-
"seed": seed
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
result_img = None
|
| 121 |
-
try:
|
| 122 |
-
session = requests.Session()
|
| 123 |
-
response = session.post(url, headers=headers, data=json.dumps(data), timeout=60)
|
| 124 |
-
print("response code", response.status_code)
|
| 125 |
-
if response.status_code == 200:
|
| 126 |
-
result = response.json()['result']
|
| 127 |
-
status = result['status']
|
| 128 |
-
if status == "success":
|
| 129 |
-
result = base64.b64decode(result['result'])
|
| 130 |
-
result_np = np.frombuffer(result, np.uint8)
|
| 131 |
-
result_img = cv2.imdecode(result_np, cv2.IMREAD_UNCHANGED)
|
| 132 |
-
result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
|
| 133 |
-
info = "Success"
|
| 134 |
-
else:
|
| 135 |
-
info = "Try again latter"
|
| 136 |
-
else:
|
| 137 |
-
print(response.text)
|
| 138 |
-
info = "URL error, pleace contact the admin"
|
| 139 |
-
except requests.exceptions.ReadTimeout:
|
| 140 |
-
print("timeout")
|
| 141 |
-
info = "Too many users, please try again later"
|
| 142 |
-
raise gr.Error("Too many users, please try again later")
|
| 143 |
-
except Exception as err:
|
| 144 |
-
print(f"其他错误: {err}")
|
| 145 |
-
info = "Error, pleace contact the admin"
|
| 146 |
-
end_time = time.time()
|
| 147 |
-
print(f"time used: {end_time-start_time}")
|
| 148 |
-
|
| 149 |
-
return result_img, seed, info
|
| 150 |
-
|
| 151 |
-
MAX_SEED = 999999
|
| 152 |
-
|
| 153 |
-
example_path = os.path.join(os.path.dirname(__file__), 'assets')
|
| 154 |
-
|
| 155 |
-
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
| 156 |
-
garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
|
| 157 |
-
|
| 158 |
-
human_list = os.listdir(os.path.join(example_path,"human"))
|
| 159 |
-
human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
|
| 160 |
-
|
| 161 |
-
css="""
|
| 162 |
-
#col-left {
|
| 163 |
-
margin: 0 auto;
|
| 164 |
-
max-width: 430px;
|
| 165 |
-
}
|
| 166 |
-
#col-mid {
|
| 167 |
-
margin: 0 auto;
|
| 168 |
-
max-width: 430px;
|
| 169 |
-
}
|
| 170 |
-
#col-right {
|
| 171 |
-
margin: 0 auto;
|
| 172 |
-
max-width: 430px;
|
| 173 |
-
}
|
| 174 |
-
#col-showcase {
|
| 175 |
-
margin: 0 auto;
|
| 176 |
-
max-width: 1100px;
|
| 177 |
-
}
|
| 178 |
-
#button {
|
| 179 |
-
color: blue;
|
| 180 |
-
}
|
| 181 |
-
"""
|
| 182 |
-
|
| 183 |
-
def load_description(fp):
|
| 184 |
-
with open(fp, 'r', encoding='utf-8') as f:
|
| 185 |
-
content = f.read()
|
| 186 |
-
return content
|
| 187 |
-
|
| 188 |
-
def change_imgs(image1, image2):
|
| 189 |
-
return image1, image2
|
| 190 |
-
|
| 191 |
-
with gr.Blocks(css=css) as Tryon:
|
| 192 |
-
gr.HTML(load_description("assets/title.md"))
|
| 193 |
-
with gr.Row():
|
| 194 |
-
with gr.Column(elem_id = "col-left"):
|
| 195 |
-
gr.HTML("""
|
| 196 |
-
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
|
| 197 |
-
<div>
|
| 198 |
-
Step 1. Upload a person image ⬇️
|
| 199 |
-
</div>
|
| 200 |
-
</div>
|
| 201 |
-
""")
|
| 202 |
-
with gr.Column(elem_id = "col-mid"):
|
| 203 |
-
gr.HTML("""
|
| 204 |
-
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
|
| 205 |
-
<div>
|
| 206 |
-
Step 2. Upload a garment image ⬇️
|
| 207 |
-
</div>
|
| 208 |
-
</div>
|
| 209 |
-
""")
|
| 210 |
-
with gr.Column(elem_id = "col-right"):
|
| 211 |
-
gr.HTML("""
|
| 212 |
-
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
|
| 213 |
-
<div>
|
| 214 |
-
Step 3. Press “Run” to get try-on results
|
| 215 |
-
</div>
|
| 216 |
-
</div>
|
| 217 |
-
""")
|
| 218 |
-
with gr.Row():
|
| 219 |
-
with gr.Column(elem_id = "col-left"):
|
| 220 |
-
imgs = gr.Image(label="Person image", sources='upload', type="numpy")
|
| 221 |
-
# category = gr.Dropdown(label="Garment category", choices=['upper_body', 'lower_body', 'dresses'], value="upper_body")
|
| 222 |
-
example = gr.Examples(
|
| 223 |
-
inputs=imgs,
|
| 224 |
-
examples_per_page=12,
|
| 225 |
-
examples=human_list_path
|
| 226 |
-
)
|
| 227 |
-
with gr.Column(elem_id = "col-mid"):
|
| 228 |
-
garm_img = gr.Image(label="Garment image", sources='upload', type="numpy")
|
| 229 |
-
example = gr.Examples(
|
| 230 |
-
inputs=garm_img,
|
| 231 |
-
examples_per_page=12,
|
| 232 |
-
examples=garm_list_path
|
| 233 |
-
)
|
| 234 |
-
with gr.Column(elem_id = "col-right"):
|
| 235 |
-
image_out = gr.Image(label="Result", show_share_button=False)
|
| 236 |
-
with gr.Row():
|
| 237 |
-
seed = gr.Slider(
|
| 238 |
-
label="Seed",
|
| 239 |
-
minimum=0,
|
| 240 |
-
maximum=MAX_SEED,
|
| 241 |
-
step=1,
|
| 242 |
-
value=0,
|
| 243 |
-
)
|
| 244 |
-
randomize_seed = gr.Checkbox(label="Random seed", value=True)
|
| 245 |
-
with gr.Row():
|
| 246 |
-
seed_used = gr.Number(label="Seed used")
|
| 247 |
-
result_info = gr.Text(label="Response")
|
| 248 |
-
# try_button = gr.Button(value="Run", elem_id="button")
|
| 249 |
-
test_button = gr.Button(value="Run", elem_id="button")
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
# try_button.click(fn=start_tryon, inputs=[imgs, garm_img, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name='tryon',concurrency_limit=10)
|
| 253 |
-
test_button.click(fn=tryon, inputs=[imgs, garm_img, seed, randomize_seed], outputs=[image_out, seed_used, result_info], api_name=False, concurrency_limit=45)
|
| 254 |
-
|
| 255 |
-
with gr.Column(elem_id = "col-showcase"):
|
| 256 |
-
gr.HTML("""
|
| 257 |
-
<div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
|
| 258 |
-
<div> </div>
|
| 259 |
-
<br>
|
| 260 |
-
<div>
|
| 261 |
-
Virtual try-on examples in pairs of person and garment images
|
| 262 |
-
</div>
|
| 263 |
-
</div>
|
| 264 |
-
""")
|
| 265 |
-
show_case = gr.Examples(
|
| 266 |
-
examples=[
|
| 267 |
-
["assets/examples/model2.png", "assets/examples/garment2.png", "assets/examples/result2.png"],
|
| 268 |
-
["assets/examples/model3.png", "assets/examples/garment3.png", "assets/examples/result3.png"],
|
| 269 |
-
["assets/examples/model1.png", "assets/examples/garment1.png", "assets/examples/result1.png"],
|
| 270 |
-
],
|
| 271 |
-
inputs=[imgs, garm_img, image_out],
|
| 272 |
-
label=None
|
| 273 |
-
)
|
| 274 |
-
|
| 275 |
-
Tryon.queue(api_open=False).launch(show_api=False)
|
|
|
|
| 1 |
+
import tempfile, os, re
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
+
import fitz # PyMuPDF
|
| 4 |
+
from TTS.utils.synthesizer import Synthesizer
|
| 5 |
+
from TTS.utils.download import download_url
|
| 6 |
+
|
| 7 |
+
MODEL_NAMES = [
|
| 8 |
+
"vits male1 (best)", "vits female (best)", "vits-male", "vits female1",
|
| 9 |
+
"glowtts-male", "glowtts-female", "female tacotron2"
|
| 10 |
+
]
|
| 11 |
+
MAX_TXT_LEN = 800
|
| 12 |
+
|
| 13 |
+
# Load/download models if not already present
|
| 14 |
+
modelInfo = [
|
| 15 |
+
["vits-male", "best_model_65633.pth", "config-0.json", "https://huggingface.co/Kamtera/persian-tts-male-vits/resolve/main/"],
|
| 16 |
+
["vits female (best)", "checkpoint_48000.pth", "config-2.json", "https://huggingface.co/Kamtera/persian-tts-female-vits/resolve/main/"],
|
| 17 |
+
["glowtts-male", "best_model_77797.pth", "config-1.json", "https://huggingface.co/Kamtera/persian-tts-male-glow_tts/resolve/main/"],
|
| 18 |
+
["glowtts-female", "best_model.pth", "config.json", "https://huggingface.co/Kamtera/persian-tts-female-glow_tts/resolve/main/"],
|
| 19 |
+
["vits male1 (best)", "checkpoint_88000.pth", "config.json", "https://huggingface.co/Kamtera/persian-tts-male1-vits/resolve/main/"],
|
| 20 |
+
["vits female1", "checkpoint_50000.pth", "config.json", "https://huggingface.co/Kamtera/persian-tts-female1-vits/resolve/main/"],
|
| 21 |
+
["female tacotron2", "checkpoint_313000.pth", "config-2.json", "https://huggingface.co/Kamtera/persian-tts-female-tacotron2/resolve/main/"]
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
for d in modelInfo:
|
| 25 |
+
if not os.path.exists(d[0]):
|
| 26 |
+
os.makedirs(d[0])
|
| 27 |
+
download_url(d[3]+d[1], d[0], "best_model.pth")
|
| 28 |
+
download_url(d[3]+d[2], d[0], "config.json")
|
| 29 |
+
|
| 30 |
+
def split_chapters(text):
|
| 31 |
+
chapters = re.split(r'\n\s*(?:فصل|بخش)[^\n]*\n', text)
|
| 32 |
+
return [ch.strip() for ch in chapters if ch.strip()]
|
| 33 |
+
|
| 34 |
+
def synthesize_text(text, synthesizer):
|
| 35 |
+
chunks = [text[i:i+MAX_TXT_LEN] for i in range(0, len(text), MAX_TXT_LEN)]
|
| 36 |
+
audio_paths = []
|
| 37 |
+
for i, chunk in enumerate(chunks):
|
| 38 |
+
wav = synthesizer.tts(chunk)
|
| 39 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
| 40 |
+
synthesizer.save_wav(wav, fp.name)
|
| 41 |
+
audio_paths.append(fp.name)
|
| 42 |
+
return audio_paths
|
| 43 |
+
|
| 44 |
+
def pdf_to_chapter_audio(pdf_file, model_name):
|
| 45 |
+
# Load model
|
| 46 |
+
synthesizer = Synthesizer(f"{model_name}/best_model.pth", f"{model_name}/config.json")
|
| 47 |
+
|
| 48 |
+
# Extract PDF text
|
| 49 |
+
doc = fitz.open(pdf_file.name)
|
| 50 |
+
text = "\n".join([page.get_text() for page in doc])
|
| 51 |
+
chapters = split_chapters(text)
|
| 52 |
+
|
| 53 |
+
chapter_audios = []
|
| 54 |
+
for i, chapter in enumerate(chapters):
|
| 55 |
+
paths = synthesize_text(chapter, synthesizer)
|
| 56 |
+
chapter_path = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name
|
| 57 |
+
os.system(f"sox {' '.join(paths)} {chapter_path}") # Merge if multiple chunks
|
| 58 |
+
chapter_audios.append((f"Chapter {i+1}", chapter_path))
|
| 59 |
+
return chapter_audios
|
| 60 |
+
|
| 61 |
+
gr.Interface(
|
| 62 |
+
fn=pdf_to_chapter_audio,
|
| 63 |
+
inputs=[
|
| 64 |
+
gr.File(label="Upload Persian PDF Book"),
|
| 65 |
+
gr.Radio(label="Pick a TTS Model", choices=MODEL_NAMES, value="vits female (best)"),
|
| 66 |
+
],
|
| 67 |
+
outputs=gr.Dataset(components=["text", gr.Audio(label="Chapter Audio", type='filepath')]),
|
| 68 |
+
title="📚 Persian Book to Audio Chapters",
|
| 69 |
+
description="Upload a Persian PDF book and convert each chapter into audio using a TTS model."
|
| 70 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|