Spaces:
Runtime error
Runtime error
Update merged_app2.py
Browse files- merged_app2.py +10 -10
merged_app2.py
CHANGED
|
@@ -759,8 +759,8 @@ def process(input_fg, prompt, image_width, image_height, num_samples, seed, step
|
|
| 759 |
|
| 760 |
bg_source = BGSource(bg_source)
|
| 761 |
|
| 762 |
-
if bg_source == BGSource.NONE:
|
| 763 |
-
|
| 764 |
if bg_source == BGSource.UPLOAD:
|
| 765 |
pass
|
| 766 |
elif bg_source == BGSource.UPLOAD_FLIP:
|
|
@@ -926,7 +926,7 @@ quick_subjects = [[x] for x in quick_subjects]
|
|
| 926 |
class BGSource(Enum):
|
| 927 |
UPLOAD = "Use Background Image"
|
| 928 |
UPLOAD_FLIP = "Use Flipped Background Image"
|
| 929 |
-
NONE = "None"
|
| 930 |
LEFT = "Left Light"
|
| 931 |
RIGHT = "Right Light"
|
| 932 |
TOP = "Top Light"
|
|
@@ -1050,7 +1050,7 @@ def use_orientation(selected_image:gr.SelectData):
|
|
| 1050 |
|
| 1051 |
|
| 1052 |
|
| 1053 |
-
def generate_description(object_description,image, detail="high", max_tokens=
|
| 1054 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 1055 |
client = OpenAI(api_key=openai_api_key)
|
| 1056 |
|
|
@@ -1062,7 +1062,7 @@ def generate_description(object_description,image, detail="high", max_tokens=250
|
|
| 1062 |
img.save(buffered, format=IMAGE_FORMAT)
|
| 1063 |
img_base64 = base64.b64encode(buffered.getvalue()).decode()
|
| 1064 |
|
| 1065 |
-
prompt = f"As if you were describing the interior design, make a detailed caption of this image in one
|
| 1066 |
|
| 1067 |
payload = {
|
| 1068 |
"model": "gpt-4o-mini",
|
|
@@ -1917,19 +1917,19 @@ with gr.Blocks() as app:
|
|
| 1917 |
extracted_fg, # Already processed RGBA image
|
| 1918 |
],
|
| 1919 |
outputs=[orientation_result],
|
| 1920 |
-
)
|
| 1921 |
|
| 1922 |
find_objects_button.click(
|
| 1923 |
fn=process_image,
|
| 1924 |
inputs=[input_fg, text_prompt],
|
| 1925 |
outputs=[extracted_objects, extracted_fg]
|
| 1926 |
-
)
|
| 1927 |
|
| 1928 |
extract_button.click(
|
| 1929 |
fn=extract_foreground,
|
| 1930 |
inputs=[input_fg],
|
| 1931 |
outputs=[extracted_fg, x_slider, y_slider]
|
| 1932 |
-
)
|
| 1933 |
|
| 1934 |
with gr.Tab("Style Transfer", visible=False):
|
| 1935 |
gr.Markdown("## Apply the style of an image to another one")
|
|
@@ -1984,7 +1984,7 @@ with gr.Blocks() as app:
|
|
| 1984 |
|
| 1985 |
send_to_relight.click(move_prompt, [description_text], [prompt]).then(move_prompt, [description_text], [prompt_fill])
|
| 1986 |
|
| 1987 |
-
transfer_btn.click(send_img, [output_image], [input_fg])
|
| 1988 |
|
| 1989 |
# describe_button.click(describe_image, [image_to_describe], [description_text])
|
| 1990 |
|
|
@@ -1998,7 +1998,7 @@ with gr.Blocks() as app:
|
|
| 1998 |
fn=generate_image,
|
| 1999 |
inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
|
| 2000 |
outputs=[output_image]
|
| 2001 |
-
)
|
| 2002 |
|
| 2003 |
if __name__ == "__main__":
|
| 2004 |
app.queue(default_concurrency_limit=3)
|
|
|
|
| 759 |
|
| 760 |
bg_source = BGSource(bg_source)
|
| 761 |
|
| 762 |
+
# if bg_source == BGSource.NONE:
|
| 763 |
+
# pass
|
| 764 |
if bg_source == BGSource.UPLOAD:
|
| 765 |
pass
|
| 766 |
elif bg_source == BGSource.UPLOAD_FLIP:
|
|
|
|
| 926 |
class BGSource(Enum):
|
| 927 |
UPLOAD = "Use Background Image"
|
| 928 |
UPLOAD_FLIP = "Use Flipped Background Image"
|
| 929 |
+
# NONE = "None"
|
| 930 |
LEFT = "Left Light"
|
| 931 |
RIGHT = "Right Light"
|
| 932 |
TOP = "Top Light"
|
|
|
|
| 1050 |
|
| 1051 |
|
| 1052 |
|
| 1053 |
+
def generate_description(object_description,image, detail="high", max_tokens=75):
|
| 1054 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 1055 |
client = OpenAI(api_key=openai_api_key)
|
| 1056 |
|
|
|
|
| 1062 |
img.save(buffered, format=IMAGE_FORMAT)
|
| 1063 |
img_base64 = base64.b64encode(buffered.getvalue()).decode()
|
| 1064 |
|
| 1065 |
+
prompt = f"As if you were describing the interior design, make a detailed caption of this image in one paragraph. Highlighting textures, furnitures, locations. This object should be included in the description :{object_description}"
|
| 1066 |
|
| 1067 |
payload = {
|
| 1068 |
"model": "gpt-4o-mini",
|
|
|
|
| 1917 |
extracted_fg, # Already processed RGBA image
|
| 1918 |
],
|
| 1919 |
outputs=[orientation_result],
|
| 1920 |
+
)#.then(clear_memory, inputs=[], outputs=[])
|
| 1921 |
|
| 1922 |
find_objects_button.click(
|
| 1923 |
fn=process_image,
|
| 1924 |
inputs=[input_fg, text_prompt],
|
| 1925 |
outputs=[extracted_objects, extracted_fg]
|
| 1926 |
+
)#.then(clear_memory, inputs=[], outputs=[])
|
| 1927 |
|
| 1928 |
extract_button.click(
|
| 1929 |
fn=extract_foreground,
|
| 1930 |
inputs=[input_fg],
|
| 1931 |
outputs=[extracted_fg, x_slider, y_slider]
|
| 1932 |
+
)#.then(clear_memory, inputs=[], outputs=[])
|
| 1933 |
|
| 1934 |
with gr.Tab("Style Transfer", visible=False):
|
| 1935 |
gr.Markdown("## Apply the style of an image to another one")
|
|
|
|
| 1984 |
|
| 1985 |
send_to_relight.click(move_prompt, [description_text], [prompt]).then(move_prompt, [description_text], [prompt_fill])
|
| 1986 |
|
| 1987 |
+
transfer_btn.click(send_img, [output_image], [input_fg])#.then(clear_memory, inputs=[], outputs=[])
|
| 1988 |
|
| 1989 |
# describe_button.click(describe_image, [image_to_describe], [description_text])
|
| 1990 |
|
|
|
|
| 1998 |
fn=generate_image,
|
| 1999 |
inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
|
| 2000 |
outputs=[output_image]
|
| 2001 |
+
)#.then(clear_memory, inputs=[], outputs=[])
|
| 2002 |
|
| 2003 |
if __name__ == "__main__":
|
| 2004 |
app.queue(default_concurrency_limit=3)
|