lch01 commited on
Commit
87232d8
·
1 Parent(s): 181e6e9

fix output of inference

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -71,6 +71,9 @@ def run_model(target_dir, model) -> dict:
71
  images = load_and_preprocess_images(image_names).to(device)
72
  print(f"Preprocessed images shape: {images.shape}")
73
 
 
 
 
74
  frames = []
75
  for i in range(images.shape[0]):
76
  image = images[i].unsqueeze(0)
@@ -86,9 +89,7 @@ def run_model(target_dir, model) -> dict:
86
  with torch.no_grad():
87
  with torch.cuda.amp.autocast(dtype=dtype):
88
  output = model.inference(frames)
89
-
90
- predictions = {}
91
-
92
  all_pts3d = []
93
  all_conf = []
94
  all_depth = []
@@ -108,13 +109,11 @@ def run_model(target_dir, model) -> dict:
108
  predictions["depth_conf"] = torch.stack(all_depth_conf, dim=0) # (S, H, W)
109
  predictions["pose_enc"] = torch.stack(all_camera_pose, dim=0) # (S, 9)
110
 
111
- predictions["images"] = images.unsqueeze(0) # (1, S, 3, H, W)
112
-
113
- print("World points shape:", predictions["world_points"].shape)
114
- print("World points confidence shape:", predictions["world_points_conf"].shape)
115
- print("Depth map shape:", predictions["depth"].shape)
116
- print("Depth confidence shape:", predictions["depth_conf"].shape)
117
- print("Pose encoding shape:", predictions["pose_enc"].shape)
118
 
119
  # Convert pose encoding to extrinsic and intrinsic matrices
120
  print("Converting pose encoding to extrinsic and intrinsic matrices...")
 
71
  images = load_and_preprocess_images(image_names).to(device)
72
  print(f"Preprocessed images shape: {images.shape}")
73
 
74
+ predictions = {}
75
+ predictions["images"] = images # (S, 3, H, W)
76
+
77
  frames = []
78
  for i in range(images.shape[0]):
79
  image = images[i].unsqueeze(0)
 
89
  with torch.no_grad():
90
  with torch.cuda.amp.autocast(dtype=dtype):
91
  output = model.inference(frames)
92
+
 
 
93
  all_pts3d = []
94
  all_conf = []
95
  all_depth = []
 
109
  predictions["depth_conf"] = torch.stack(all_depth_conf, dim=0) # (S, H, W)
110
  predictions["pose_enc"] = torch.stack(all_camera_pose, dim=0) # (S, 9)
111
 
112
+ #print("World points shape:", predictions["world_points"].shape)
113
+ #print("World points confidence shape:", predictions["world_points_conf"].shape)
114
+ #print("Depth map shape:", predictions["depth"].shape)
115
+ #print("Depth confidence shape:", predictions["depth_conf"].shape)
116
+ #print("Pose encoding shape:", predictions["pose_enc"].shape)
 
 
117
 
118
  # Convert pose encoding to extrinsic and intrinsic matrices
119
  print("Converting pose encoding to extrinsic and intrinsic matrices...")