Spaces:
Running
on
T4
Running
on
T4
Fix device variable which was shadowed (#8)
Browse files- Remove device variable from gradio state for now (9f022b04640263be67b10e2bc5d1bc092eae547e)
- .dockerignore +2 -0
- .gitignore +3 -0
- app.py +11 -10
.dockerignore
CHANGED
|
@@ -5,6 +5,8 @@ env
|
|
| 5 |
.github/
|
| 6 |
.gitignore
|
| 7 |
|
|
|
|
|
|
|
| 8 |
*.zip
|
| 9 |
*.whl
|
| 10 |
!gradio_image_prompter-0.1.0-py3-none-any.whl
|
|
|
|
| 5 |
.github/
|
| 6 |
.gitignore
|
| 7 |
|
| 8 |
+
models/GroundingDINO/ops/build/
|
| 9 |
+
|
| 10 |
*.zip
|
| 11 |
*.whl
|
| 12 |
!gradio_image_prompter-0.1.0-py3-none-any.whl
|
.gitignore
CHANGED
|
@@ -4,5 +4,8 @@ __pycache__
|
|
| 4 |
.python-version
|
| 5 |
*.py[od]
|
| 6 |
|
|
|
|
|
|
|
|
|
|
| 7 |
# vim
|
| 8 |
*.sw[op]
|
|
|
|
| 4 |
.python-version
|
| 5 |
*.py[od]
|
| 6 |
|
| 7 |
+
# GroundingDINO build folder
|
| 8 |
+
models/GroundingDINO/ops/build/
|
| 9 |
+
|
| 10 |
# vim
|
| 11 |
*.sw[op]
|
app.py
CHANGED
|
@@ -285,15 +285,17 @@ if __name__ == '__main__':
|
|
| 285 |
model, transform = build_model_and_transforms(args)
|
| 286 |
model = model.to(device)
|
| 287 |
|
| 288 |
-
_predict
|
|
|
|
|
|
|
| 289 |
|
| 290 |
@spaces.GPU(duration=120)
|
| 291 |
-
def count(image, text, prompts, state
|
| 292 |
if prompts is None:
|
| 293 |
prompts = {"image": image, "points": []}
|
| 294 |
|
| 295 |
boxes, _ = _predict(image, text, prompts)
|
| 296 |
-
|
| 297 |
output_img = generate_heatmap(image, boxes)
|
| 298 |
|
| 299 |
num_exemplars = len(get_box_inputs(prompts["points"]))
|
|
@@ -317,19 +319,19 @@ if __name__ == '__main__':
|
|
| 317 |
main_instructions_comp = gr.Markdown(visible=True)
|
| 318 |
step_3 = gr.Tab(visible=True)
|
| 319 |
|
| 320 |
-
return (gr.Image(output_img, visible=True, label=out_label, show_label=True), gr.Number(label="Predicted Count", visible=True, value=
|
| 321 |
|
| 322 |
@spaces.GPU
|
| 323 |
-
def count_main(image, text, prompts
|
| 324 |
if prompts is None:
|
| 325 |
prompts = {"image": image, "points": []}
|
| 326 |
boxes, _ = _predict(image, text, prompts)
|
| 327 |
-
|
| 328 |
output_img = generate_heatmap(image, boxes)
|
| 329 |
num_exemplars = len(get_box_inputs(prompts["points"]))
|
| 330 |
out_label = generate_output_label(text, num_exemplars)
|
| 331 |
|
| 332 |
-
return (gr.Image(output_img, visible=True, label=out_label, show_label=True), gr.Number(label="Predicted Count", visible=True, value=
|
| 333 |
|
| 334 |
def remove_label(image):
|
| 335 |
return gr.Image(show_label=False)
|
|
@@ -359,7 +361,6 @@ if __name__ == '__main__':
|
|
| 359 |
|
| 360 |
with gr.Blocks(title="CountGD: Multi-Modal Open-World Counting", theme="soft", head="""<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=1">""") as demo:
|
| 361 |
state = gr.State(value=[AppSteps.JUST_TEXT])
|
| 362 |
-
device = gr.State(device)
|
| 363 |
with gr.Tab("Tutorial"):
|
| 364 |
with gr.Row():
|
| 365 |
with gr.Column():
|
|
@@ -383,7 +384,7 @@ if __name__ == '__main__':
|
|
| 383 |
pred_count = gr.Number(label="Predicted Count", visible=False)
|
| 384 |
submit_btn = gr.Button("Count", variant="primary", interactive=True)
|
| 385 |
|
| 386 |
-
submit_btn.click(fn=remove_label, inputs=[detected_instances], outputs=[detected_instances]).then(fn=count, inputs=[input_image, input_text, exemplar_image, state
|
| 387 |
exemplar_image.change(check_submit_btn, inputs=[exemplar_image, state], outputs=[submit_btn])
|
| 388 |
with gr.Tab("App", visible=True) as main_app:
|
| 389 |
|
|
@@ -409,7 +410,7 @@ if __name__ == '__main__':
|
|
| 409 |
submit_btn_main = gr.Button("Count", variant="primary")
|
| 410 |
clear_btn_main = gr.ClearButton(variant="secondary")
|
| 411 |
gr.Examples(label="Examples: click on a row to load the example. Add visual exemplars by drawing boxes on the loaded \"Visual Exemplar Image.\"", examples=examples, inputs=[input_image_main, input_text_main, exemplar_image_main])
|
| 412 |
-
submit_btn_main.click(fn=remove_label, inputs=[detected_instances_main], outputs=[detected_instances_main]).then(fn=count_main, inputs=[input_image_main, input_text_main, exemplar_image_main
|
| 413 |
clear_btn_main.add([input_image_main, input_text_main, exemplar_image_main, detected_instances_main, pred_count_main])
|
| 414 |
|
| 415 |
|
|
|
|
| 285 |
model, transform = build_model_and_transforms(args)
|
| 286 |
model = model.to(device)
|
| 287 |
|
| 288 |
+
def _predict(image, text, prompts):
|
| 289 |
+
return predict(model, transform, image, text, prompts, device)
|
| 290 |
+
|
| 291 |
|
| 292 |
@spaces.GPU(duration=120)
|
| 293 |
+
def count(image, text, prompts, state):
|
| 294 |
if prompts is None:
|
| 295 |
prompts = {"image": image, "points": []}
|
| 296 |
|
| 297 |
boxes, _ = _predict(image, text, prompts)
|
| 298 |
+
predicted_count = len(boxes)
|
| 299 |
output_img = generate_heatmap(image, boxes)
|
| 300 |
|
| 301 |
num_exemplars = len(get_box_inputs(prompts["points"]))
|
|
|
|
| 319 |
main_instructions_comp = gr.Markdown(visible=True)
|
| 320 |
step_3 = gr.Tab(visible=True)
|
| 321 |
|
| 322 |
+
return (gr.Image(output_img, visible=True, label=out_label, show_label=True), gr.Number(label="Predicted Count", visible=True, value=predicted_count), new_submit_btn, gr.Tab(visible=True), step_3, state)
|
| 323 |
|
| 324 |
@spaces.GPU
|
| 325 |
+
def count_main(image, text, prompts):
|
| 326 |
if prompts is None:
|
| 327 |
prompts = {"image": image, "points": []}
|
| 328 |
boxes, _ = _predict(image, text, prompts)
|
| 329 |
+
predicted_count = len(boxes)
|
| 330 |
output_img = generate_heatmap(image, boxes)
|
| 331 |
num_exemplars = len(get_box_inputs(prompts["points"]))
|
| 332 |
out_label = generate_output_label(text, num_exemplars)
|
| 333 |
|
| 334 |
+
return (gr.Image(output_img, visible=True, label=out_label, show_label=True), gr.Number(label="Predicted Count", visible=True, value=predicted_count))
|
| 335 |
|
| 336 |
def remove_label(image):
|
| 337 |
return gr.Image(show_label=False)
|
|
|
|
| 361 |
|
| 362 |
with gr.Blocks(title="CountGD: Multi-Modal Open-World Counting", theme="soft", head="""<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=1">""") as demo:
|
| 363 |
state = gr.State(value=[AppSteps.JUST_TEXT])
|
|
|
|
| 364 |
with gr.Tab("Tutorial"):
|
| 365 |
with gr.Row():
|
| 366 |
with gr.Column():
|
|
|
|
| 384 |
pred_count = gr.Number(label="Predicted Count", visible=False)
|
| 385 |
submit_btn = gr.Button("Count", variant="primary", interactive=True)
|
| 386 |
|
| 387 |
+
submit_btn.click(fn=remove_label, inputs=[detected_instances], outputs=[detected_instances]).then(fn=count, inputs=[input_image, input_text, exemplar_image, state], outputs=[detected_instances, pred_count, submit_btn, step_2, step_3, state])
|
| 388 |
exemplar_image.change(check_submit_btn, inputs=[exemplar_image, state], outputs=[submit_btn])
|
| 389 |
with gr.Tab("App", visible=True) as main_app:
|
| 390 |
|
|
|
|
| 410 |
submit_btn_main = gr.Button("Count", variant="primary")
|
| 411 |
clear_btn_main = gr.ClearButton(variant="secondary")
|
| 412 |
gr.Examples(label="Examples: click on a row to load the example. Add visual exemplars by drawing boxes on the loaded \"Visual Exemplar Image.\"", examples=examples, inputs=[input_image_main, input_text_main, exemplar_image_main])
|
| 413 |
+
submit_btn_main.click(fn=remove_label, inputs=[detected_instances_main], outputs=[detected_instances_main]).then(fn=count_main, inputs=[input_image_main, input_text_main, exemplar_image_main], outputs=[detected_instances_main, pred_count_main])
|
| 414 |
clear_btn_main.add([input_image_main, input_text_main, exemplar_image_main, detected_instances_main, pred_count_main])
|
| 415 |
|
| 416 |
|