Spaces:
Running
on
Zero
Running
on
Zero
fix output of inference
Browse files
app.py
CHANGED
@@ -96,12 +96,12 @@ def run_model(target_dir, model) -> dict:
|
|
96 |
all_camera_pose = []
|
97 |
|
98 |
for res in output.ress:
|
99 |
-
all_pts3d.append(res['pts3d_in_other_view'])
|
100 |
-
all_conf.append(res['conf'])
|
101 |
-
all_depth.append(res['depth'])
|
102 |
-
all_depth_conf.append(res['depth_conf'])
|
103 |
-
all_camera_pose.append(res['camera_pose'])
|
104 |
-
|
105 |
predictions["world_points"] = torch.stack(all_pts3d, dim=0) # (S, H, W, 3)
|
106 |
predictions["world_points_conf"] = torch.stack(all_conf, dim=0) # (S, H, W)
|
107 |
predictions["depth"] = torch.stack(all_depth, dim=0) # (S, H, W, 1)
|
|
|
96 |
all_camera_pose = []
|
97 |
|
98 |
for res in output.ress:
|
99 |
+
all_pts3d.append(res['pts3d_in_other_view'].squeeze(0))
|
100 |
+
all_conf.append(res['conf'].squeeze(0))
|
101 |
+
all_depth.append(res['depth'].squeeze(0))
|
102 |
+
all_depth_conf.append(res['depth_conf'].squeeze(0))
|
103 |
+
all_camera_pose.append(res['camera_pose'].squeeze(0))
|
104 |
+
|
105 |
predictions["world_points"] = torch.stack(all_pts3d, dim=0) # (S, H, W, 3)
|
106 |
predictions["world_points_conf"] = torch.stack(all_conf, dim=0) # (S, H, W)
|
107 |
predictions["depth"] = torch.stack(all_depth, dim=0) # (S, H, W, 1)
|