Spaces:
Runtime error
Runtime error
tingwei.wang
commited on
Commit
Β·
7ab7ce6
1
Parent(s):
db580a6
fix:fix some bug
Browse files
app.py
CHANGED
|
@@ -6,13 +6,13 @@ import time
|
|
| 6 |
import torch
|
| 7 |
import spaces
|
| 8 |
|
| 9 |
-
MODEL_ID = "Qwen/Qwen2-VL-
|
| 10 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
| 11 |
-
model =
|
| 12 |
MODEL_ID,
|
| 13 |
trust_remote_code=True,
|
| 14 |
torch_dtype=torch.bfloat16
|
| 15 |
-
).to("
|
| 16 |
|
| 17 |
@spaces.GPU
|
| 18 |
def model_inference(input_dict, history):
|
|
@@ -78,7 +78,7 @@ examples = [
|
|
| 78 |
|
| 79 |
demo = gr.ChatInterface(
|
| 80 |
fn=model_inference,
|
| 81 |
-
description="# **Qwen2-VL-
|
| 82 |
examples=examples,
|
| 83 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
| 84 |
stop_btn="Stop Generation",
|
|
|
|
| 6 |
import torch
|
| 7 |
import spaces
|
| 8 |
|
| 9 |
+
MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
|
| 10 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
| 11 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 12 |
MODEL_ID,
|
| 13 |
trust_remote_code=True,
|
| 14 |
torch_dtype=torch.bfloat16
|
| 15 |
+
).to("cuda").eval()
|
| 16 |
|
| 17 |
@spaces.GPU
|
| 18 |
def model_inference(input_dict, history):
|
|
|
|
| 78 |
|
| 79 |
demo = gr.ChatInterface(
|
| 80 |
fn=model_inference,
|
| 81 |
+
description="# **Qwen2.5-VL-3B-Instruct**",
|
| 82 |
examples=examples,
|
| 83 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
| 84 |
stop_btn="Stop Generation",
|