Nekshay commited on
Commit
d5c46bf
·
verified ·
1 Parent(s): d560c2f

Create onnx_runtime_draw_bboxes.py

Browse files
Files changed (1) hide show
  1. onnx_runtime_draw_bboxes.py +77 -0
onnx_runtime_draw_bboxes.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime as ort
2
+ import numpy as np
3
+ import cv2
4
+ import os
5
+
6
+ # Load the ONNX model
7
+ model_path = "model.onnx"
8
+ session = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"])
9
+
10
+ # Folder paths
11
+ input_folder = "input_images" # Folder containing images
12
+ output_folder = "output_images" # Folder to save processed images
13
+ os.makedirs(output_folder, exist_ok=True)
14
+
15
+ # Define model input shape
16
+ MODEL_INPUT_SIZE = (320, 320) # Change based on your model input size
17
+
18
+ # Define class labels (update based on your model)
19
+ CLASS_NAMES = ["person", "car", "truck", "bicycle", "dog", "cat"] # Update accordingly
20
+
21
+ def preprocess_image(image_path):
22
+ """ Preprocess image for ONNX model input """
23
+ image = cv2.imread(image_path)
24
+ original_image = image.copy() # Save original for later drawing
25
+ image = cv2.resize(image, MODEL_INPUT_SIZE) # Resize to model input size
26
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert to RGB
27
+ image = image.astype(np.float32) / 255.0 # Normalize
28
+ image = np.transpose(image, (2, 0, 1)) # Convert to CHW format
29
+ image = np.expand_dims(image, axis=0) # Add batch dimension
30
+ return image, original_image
31
+
32
+ def postprocess_output(output, orig_image):
33
+ """ Post-process ONNX model output """
34
+ height, width, _ = orig_image.shape
35
+
36
+ # Extract boxes, scores, and class indices
37
+ boxes = output[0] # Adjust key names if needed
38
+ scores = output[1]
39
+ class_indices = output[2]
40
+
41
+ # Draw bounding boxes
42
+ for i in range(len(scores)):
43
+ if scores[i] > 0.5: # Confidence threshold
44
+ x1, y1, x2, y2 = boxes[i] # Get box coordinates
45
+ x1, y1, x2, y2 = int(x1 * width), int(y1 * height), int(x2 * width), int(y2 * height) # Scale box
46
+
47
+ label = CLASS_NAMES[int(class_indices[i])]
48
+ confidence = scores[i]
49
+
50
+ # Draw bounding box
51
+ cv2.rectangle(orig_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
52
+
53
+ # Put label text
54
+ label_text = f"{label}: {confidence:.2f}"
55
+ cv2.putText(orig_image, label_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
56
+
57
+ return orig_image
58
+
59
+ # Process all images in the input folder
60
+ for image_name in os.listdir(input_folder):
61
+ image_path = os.path.join(input_folder, image_name)
62
+ output_path = os.path.join(output_folder, image_name)
63
+
64
+ # Preprocess
65
+ image_tensor, orig_image = preprocess_image(image_path)
66
+
67
+ # Run inference
68
+ inputs = {session.get_inputs()[0].name: image_tensor}
69
+ outputs = session.run(None, inputs)
70
+
71
+ # Post-process and save image
72
+ processed_image = postprocess_output(outputs, orig_image)
73
+ cv2.imwrite(output_path, processed_image)
74
+
75
+ print(f"Processed: {image_name}")
76
+
77
+ print("Processing complete! Results saved in", output_folder)