Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,6 @@ import openai
|
|
| 6 |
import requests
|
| 7 |
import json
|
| 8 |
import streamlit.components.v1 as components
|
| 9 |
-
from streamlit_drawable_canvas import st_canvas
|
| 10 |
import webbrowser
|
| 11 |
import pickle
|
| 12 |
import random
|
|
@@ -167,151 +166,6 @@ def update_chat(messages, role, content):
|
|
| 167 |
messages.append({"role": role, "content": content})
|
| 168 |
return messages
|
| 169 |
|
| 170 |
-
|
| 171 |
-
# # Defining Stable Diffusion Methods
|
| 172 |
-
# def get_image(key: str) -> Optional[Image.Image]:
|
| 173 |
-
# if key in st.session_state:
|
| 174 |
-
# return st.session_state[key]
|
| 175 |
-
# return None
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
# def set_image(key: str, img: Image.Image):
|
| 179 |
-
# st.session_state[key] = img
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
# def prompt_and_generate_button(prefix, pipeline_name: PIPELINE_NAMES, **kwargs):
|
| 183 |
-
# prompt = st.text_area(
|
| 184 |
-
# "Prompt",
|
| 185 |
-
# value=DEFAULT_PROMPT,
|
| 186 |
-
# key=f"{prefix}-prompt",
|
| 187 |
-
# )
|
| 188 |
-
# negative_prompt = st.text_area(
|
| 189 |
-
# "Negative prompt",
|
| 190 |
-
# value="",
|
| 191 |
-
# key=f"{prefix}-negative-prompt",
|
| 192 |
-
# )
|
| 193 |
-
# steps = st.slider("Number of inference steps", min_value=1, max_value=200, value=50, key=f"{prefix}-steps")
|
| 194 |
-
# guidance_scale = st.slider(
|
| 195 |
-
# "Guidance scale", min_value=0.0, max_value=20.0, value=7.5, step=0.5,key=f"{prefix}-guidance"
|
| 196 |
-
# )
|
| 197 |
-
|
| 198 |
-
# if st.button("Generate image", key=f"{prefix}-btn"):
|
| 199 |
-
# with st.spinner("Generating image..."):
|
| 200 |
-
# image = generate(
|
| 201 |
-
# prompt,
|
| 202 |
-
# pipeline_name,
|
| 203 |
-
# negative_prompt=negative_prompt,
|
| 204 |
-
# steps=steps,
|
| 205 |
-
# guidance_scale=guidance_scale,
|
| 206 |
-
# **kwargs,
|
| 207 |
-
# )
|
| 208 |
-
# set_image(OUTPUT_IMAGE_KEY, image.copy())
|
| 209 |
-
# st.image(image)
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
# def width_and_height_sliders(prefix):
|
| 213 |
-
# col1, col2 = st.columns(2)
|
| 214 |
-
# with col1:
|
| 215 |
-
# width = st.slider(
|
| 216 |
-
# "Width",
|
| 217 |
-
# min_value=512,
|
| 218 |
-
# max_value=1024,
|
| 219 |
-
# step=64,
|
| 220 |
-
# value=512,
|
| 221 |
-
# key=f"{prefix}-width",
|
| 222 |
-
# )
|
| 223 |
-
# with col2:
|
| 224 |
-
# height = st.slider(
|
| 225 |
-
# "Height",
|
| 226 |
-
# min_value=512,
|
| 227 |
-
# max_value=1024,
|
| 228 |
-
# step=64,
|
| 229 |
-
# value=512,
|
| 230 |
-
# key=f"{prefix}-height",
|
| 231 |
-
# )
|
| 232 |
-
# return width, height
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
# def image_uploader(prefix):
|
| 236 |
-
# image = st.file_uploader("Image", ["jpg", "png"], key=f"{prefix}-uploader")
|
| 237 |
-
# if image:
|
| 238 |
-
# image = Image.open(image)
|
| 239 |
-
# print(f"loaded input image of size ({image.width}, {image.height})")
|
| 240 |
-
# image = image.resize((DEFAULT_WIDTH, DEFAULT_HEIGHT))
|
| 241 |
-
# return image
|
| 242 |
-
|
| 243 |
-
# return get_image(LOADED_IMAGE_KEY)
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
# def inpainting():
|
| 247 |
-
# image = image_uploader("inpainting")
|
| 248 |
-
|
| 249 |
-
# if not image:
|
| 250 |
-
# return None, None
|
| 251 |
-
|
| 252 |
-
# brush_size = st.number_input("Brush Size", value=50, min_value=1, max_value=100)
|
| 253 |
-
|
| 254 |
-
# canvas_result = st_canvas(
|
| 255 |
-
# fill_color="rgba(255, 255, 255, 0.0)",
|
| 256 |
-
# stroke_width=brush_size,
|
| 257 |
-
# stroke_color="#FFFFFF",
|
| 258 |
-
# background_color="#000000",
|
| 259 |
-
# background_image=image,
|
| 260 |
-
# update_streamlit=True,
|
| 261 |
-
# height=image.height,
|
| 262 |
-
# width=image.width,
|
| 263 |
-
# drawing_mode="freedraw",
|
| 264 |
-
# # Use repr(image) to force the component to reload when the image
|
| 265 |
-
# # changes, i.e. when asking to use the current output image
|
| 266 |
-
# key="inpainting",
|
| 267 |
-
# )
|
| 268 |
-
|
| 269 |
-
# if not canvas_result or canvas_result.image_data is None:
|
| 270 |
-
# return None, None
|
| 271 |
-
|
| 272 |
-
# mask = canvas_result.image_data
|
| 273 |
-
# mask = mask[:, :, -1] > 0
|
| 274 |
-
# if mask.sum() > 0:
|
| 275 |
-
# mask = Image.fromarray(mask)
|
| 276 |
-
# st.image(mask)
|
| 277 |
-
# return image, mask
|
| 278 |
-
|
| 279 |
-
# return None, None
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
# def txt2img_tab():
|
| 283 |
-
# prefix = "txt2img"
|
| 284 |
-
# width, height = width_and_height_sliders(prefix)
|
| 285 |
-
# prompt_and_generate_button(prefix, "txt2img", width=width, height=height)
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
# def inpainting_tab():
|
| 289 |
-
# col1, col2 = st.columns(2)
|
| 290 |
-
|
| 291 |
-
# with col1:
|
| 292 |
-
# image_input, mask_input = inpainting()
|
| 293 |
-
|
| 294 |
-
# with col2:
|
| 295 |
-
# if image_input and mask_input:
|
| 296 |
-
# prompt_and_generate_button(
|
| 297 |
-
# "inpaint", "inpaint", image_input=image_input, mask_input=mask_input
|
| 298 |
-
# )
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
# def img2img_tab():
|
| 302 |
-
# col1, col2 = st.columns(2)
|
| 303 |
-
|
| 304 |
-
# with col1:
|
| 305 |
-
# image = image_uploader("img2img")
|
| 306 |
-
# if image:
|
| 307 |
-
# st.image(image)
|
| 308 |
-
|
| 309 |
-
# with col2:
|
| 310 |
-
# if image:
|
| 311 |
-
# prompt_and_generate_button("img2img", "img2img", image_input=image)
|
| 312 |
-
|
| 313 |
-
# # End of Stable Diffusion Methods
|
| 314 |
-
|
| 315 |
# ------
|
| 316 |
# Define image sizes
|
| 317 |
image_sizes = {
|
|
@@ -2866,96 +2720,4 @@ Format the text as follows using HTML code and H2 sub headings: [introduction
|
|
| 2866 |
|
| 2867 |
# Allow the user to view the conversation history and other information stored in the agent's memory
|
| 2868 |
with st.expander("History/Memory"):
|
| 2869 |
-
st.session_state.memory
|
| 2870 |
-
# elif selected == "GPT-LipSync":
|
| 2871 |
-
# genre = st.radio(
|
| 2872 |
-
# "What type of content do you want to generate?",
|
| 2873 |
-
# ('chatGPT', 'Fixed'))
|
| 2874 |
-
# if genre == 'chatGPT':
|
| 2875 |
-
# # Stage 1: Choose avatar image
|
| 2876 |
-
# st.subheader("Stage 1: Choose Avatar Image")
|
| 2877 |
-
# col1, col2 = st.columns([1, 2])
|
| 2878 |
-
# with col1:
|
| 2879 |
-
# avatar_images = ["avatar1.jpg", "avatar2.jpg","avatar3.jpg", "avatar4.jpg"]
|
| 2880 |
-
# selected_avatar_index = st.selectbox("Choose your avatar image", range(len(avatar_images)), format_func=lambda i: avatar_images[i], index=0)
|
| 2881 |
-
# # print(selected_avatar_index)
|
| 2882 |
-
# with col2:
|
| 2883 |
-
# st.image(avatar_images[selected_avatar_index], width=200)
|
| 2884 |
-
# # # avatar_images = ["avatar1.jpg", "avatar2.jpg", "avatar3.jpg", "avatar4.jpg", "avatar5.jpg", "avatar6.jpg"]
|
| 2885 |
-
# # avatar_images = ["avatar1.jpg", "avatar2.jpg"]
|
| 2886 |
-
# # selected_avatar_index = st.selectbox("Choose your avatar image", range(len(avatar_images)), format_func=lambda i: avatar_images[i], index=0)
|
| 2887 |
-
|
| 2888 |
-
# st.subheader("Stage 2: Generate Video Script")
|
| 2889 |
-
# st.info("As soon as you enter the prompt, press CTRL+Enter or just click anywhere on the black screen!")
|
| 2890 |
-
# prompt = st.text_area("Enter your prompt here", height=200)
|
| 2891 |
-
# if prompt:
|
| 2892 |
-
# st.info("Generating video script...")
|
| 2893 |
-
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[
|
| 2894 |
-
# {"role": "system", "content": "You are an AI assistant custom trained and created by Alpha AI. You are proficient at everytask."},
|
| 2895 |
-
# {"role": "user", "content": "Generate only a first person video script for the speaker on " + prompt}
|
| 2896 |
-
# ],max_tokens=2500, temperature = 0.6,presence_penalty = 0.1,frequency_penalty = 0.1)
|
| 2897 |
-
# # print(type(completion))
|
| 2898 |
-
# video_script = completion.choices[0].message.content
|
| 2899 |
-
# st.success("Video script generated successfully!")
|
| 2900 |
-
# edit_script = st.text_area("Edit your video script here", value=video_script, height=200)
|
| 2901 |
-
# if st.button("Accept Script", type="primary"):
|
| 2902 |
-
# video_script = edit_script.strip()
|
| 2903 |
-
# if text_to_speech_avatar(video_script):
|
| 2904 |
-
# st.success("Audio has been generated! Go to Stage 3")
|
| 2905 |
-
# # Stage 3: Generate lip sync video
|
| 2906 |
-
# st.subheader("Stage 3: Generate Lip Sync Video")
|
| 2907 |
-
# if st.button("Generate Video", type="primary"):
|
| 2908 |
-
# st.spinner("Generating lip sync video...")
|
| 2909 |
-
# # st.info("Generating lip sync video...")
|
| 2910 |
-
# dv_index = selected_avatar_index + 1
|
| 2911 |
-
# # avatar_image = avatar_images[selected_avatar_index]
|
| 2912 |
-
# driving_video = f"{dv_index}.mp4"
|
| 2913 |
-
# addition_name = st.session_state['name'][:5]
|
| 2914 |
-
# aud_name = "dummy_" + addition_name
|
| 2915 |
-
# output_vid = "output_" + addition_name
|
| 2916 |
-
# # os.system(rf"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {driving_video} --audio 'temp/dummy.mp3' --outfile output.mp4")
|
| 2917 |
-
# # subprocess.run(["python", "generate_video.py", avatar_image, driving_video, "audio.wav"])
|
| 2918 |
-
# cmd = f"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {driving_video} --audio temp/{aud_name}.mp3 --outfile {output_vid}.mp4"
|
| 2919 |
-
# subprocess.run(cmd, shell=True)
|
| 2920 |
-
# # subprocess.run(["python", "inference.py","--checkpoint_path checkpoints/wav2lip_gan.pth","--face ",driving_video,"--audio temp/dummy.mp3","--outfile output.mp4"])
|
| 2921 |
-
# st.success("Lip sync video generated successfully!")
|
| 2922 |
-
# st.video(output_vid+".mp4")
|
| 2923 |
-
# elif genre == 'Fixed':
|
| 2924 |
-
# # Stage 1: Choose avatar image
|
| 2925 |
-
# st.subheader("Stage 1: Choose Avatar Image")
|
| 2926 |
-
# col1, col2 = st.columns([1, 2])
|
| 2927 |
-
# with col1:
|
| 2928 |
-
# avatar_images = ["avatar1.jpg", "avatar2.jpg","avatar3.jpg", "avatar4.jpg"]
|
| 2929 |
-
# selected_avatar_index = st.selectbox("Choose your avatar image", range(len(avatar_images)), format_func=lambda i: avatar_images[i], index=0)
|
| 2930 |
-
# # print(selected_avatar_index)
|
| 2931 |
-
# with col2:
|
| 2932 |
-
# st.image(avatar_images[selected_avatar_index], width=200)
|
| 2933 |
-
# # # avatar_images = ["avatar1.jpg", "avatar2.jpg", "avatar3.jpg", "avatar4.jpg", "avatar5.jpg", "avatar6.jpg"]
|
| 2934 |
-
# # avatar_images = ["avatar1.jpg", "avatar2.jpg"]
|
| 2935 |
-
# # selected_avatar_index = st.selectbox("Choose your avatar image", range(len(avatar_images)), format_func=lambda i: avatar_images[i], index=0)
|
| 2936 |
-
|
| 2937 |
-
# st.subheader("Stage 2: Enter the Script")
|
| 2938 |
-
# st.info("As soon as you enter the prompt, press CTRL+Enter or just click anywhere on the black screen!")
|
| 2939 |
-
# prompt = st.text_area("Enter your prompt here", height=200)
|
| 2940 |
-
# if st.button("Generate Script"):
|
| 2941 |
-
# st.info("Generating video script...")
|
| 2942 |
-
# if text_to_speech_avatar(prompt):
|
| 2943 |
-
# st.success("Audio has been generated! Go to Stage 3")
|
| 2944 |
-
# # Stage 3: Generate lip sync video
|
| 2945 |
-
# st.subheader("Stage 3: Generate Lip Sync Video")
|
| 2946 |
-
# if st.button("Generate Video", type="primary"):
|
| 2947 |
-
# st.spinner("Generating lip sync video...")
|
| 2948 |
-
# # st.info("Generating lip sync video...")
|
| 2949 |
-
# dv_index = selected_avatar_index + 1
|
| 2950 |
-
# # avatar_image = avatar_images[selected_avatar_index]
|
| 2951 |
-
# driving_video = f"{dv_index}.mp4"
|
| 2952 |
-
# addition_name = st.session_state['name'][:5]
|
| 2953 |
-
# aud_name = "dummy_" + addition_name
|
| 2954 |
-
# output_vid = "output_" + addition_name
|
| 2955 |
-
# # os.system(rf"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {driving_video} --audio 'temp/dummy.mp3' --outfile output.mp4")
|
| 2956 |
-
# # subprocess.run(["python", "generate_video.py", avatar_image, driving_video, "audio.wav"])
|
| 2957 |
-
# cmd = f"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {driving_video} --audio temp/{aud_name}.mp3 --outfile {output_vid}.mp4"
|
| 2958 |
-
# subprocess.run(cmd, shell=True)
|
| 2959 |
-
# # subprocess.run(["python", "inference.py","--checkpoint_path checkpoints/wav2lip_gan.pth","--face ",driving_video,"--audio temp/dummy.mp3","--outfile output.mp4"])
|
| 2960 |
-
# st.success("Lip sync video generated successfully!")
|
| 2961 |
-
# st.video(output_vid+".mp4")
|
|
|
|
| 6 |
import requests
|
| 7 |
import json
|
| 8 |
import streamlit.components.v1 as components
|
|
|
|
| 9 |
import webbrowser
|
| 10 |
import pickle
|
| 11 |
import random
|
|
|
|
| 166 |
messages.append({"role": role, "content": content})
|
| 167 |
return messages
|
| 168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
# ------
|
| 170 |
# Define image sizes
|
| 171 |
image_sizes = {
|
|
|
|
| 2720 |
|
| 2721 |
# Allow the user to view the conversation history and other information stored in the agent's memory
|
| 2722 |
with st.expander("History/Memory"):
|
| 2723 |
+
st.session_state.memory
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|