Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,9 +13,6 @@ import matplotlib.patches as patches
|
|
| 13 |
import random
|
| 14 |
import numpy as np
|
| 15 |
|
| 16 |
-
import subprocess
|
| 17 |
-
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
| 18 |
-
|
| 19 |
models = {
|
| 20 |
'microsoft/Florence-2-large-ft': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large-ft', trust_remote_code=True).to("cuda").eval(),
|
| 21 |
'microsoft/Florence-2-large': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to("cuda").eval(),
|
|
@@ -42,10 +39,12 @@ def fig_to_pil(fig):
|
|
| 42 |
buf.seek(0)
|
| 43 |
return Image.open(buf)
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
@spaces.GPU
|
| 46 |
def run_example(task_prompt, image, text_input=None, model_id='microsoft/Florence-2-large'):
|
| 47 |
-
model = models[model_id]
|
| 48 |
-
processor = processors[model_id]
|
| 49 |
if text_input is None:
|
| 50 |
prompt = task_prompt
|
| 51 |
else:
|
|
@@ -257,7 +256,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 257 |
with gr.Row():
|
| 258 |
with gr.Column():
|
| 259 |
input_img = gr.Image(label="Input Picture")
|
| 260 |
-
model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value='microsoft/Florence-2-large')
|
| 261 |
task_type = gr.Radio(choices=['Single task', 'Cascased task'], label='Task type selector', value='Single task')
|
| 262 |
task_prompt = gr.Dropdown(choices=single_task_list, label="Task Prompt", value="Caption")
|
| 263 |
task_type.change(fn=update_task_dropdown, inputs=task_type, outputs=task_prompt)
|
|
|
|
| 13 |
import random
|
| 14 |
import numpy as np
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
models = {
|
| 17 |
'microsoft/Florence-2-large-ft': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large-ft', trust_remote_code=True).to("cuda").eval(),
|
| 18 |
'microsoft/Florence-2-large': AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to("cuda").eval(),
|
|
|
|
| 39 |
buf.seek(0)
|
| 40 |
return Image.open(buf)
|
| 41 |
|
| 42 |
+
model_id='microsoft/Florence-2-large'
|
| 43 |
+
model = models[model_id]
|
| 44 |
+
processor = processors[model_id]
|
| 45 |
+
|
| 46 |
@spaces.GPU
|
| 47 |
def run_example(task_prompt, image, text_input=None, model_id='microsoft/Florence-2-large'):
|
|
|
|
|
|
|
| 48 |
if text_input is None:
|
| 49 |
prompt = task_prompt
|
| 50 |
else:
|
|
|
|
| 256 |
with gr.Row():
|
| 257 |
with gr.Column():
|
| 258 |
input_img = gr.Image(label="Input Picture")
|
| 259 |
+
$model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value='microsoft/Florence-2-large', visible=False)
|
| 260 |
task_type = gr.Radio(choices=['Single task', 'Cascased task'], label='Task type selector', value='Single task')
|
| 261 |
task_prompt = gr.Dropdown(choices=single_task_list, label="Task Prompt", value="Caption")
|
| 262 |
task_type.change(fn=update_task_dropdown, inputs=task_type, outputs=task_prompt)
|