|
| 1 | +import argparse |
| 2 | +import time |
| 3 | + |
| 4 | +import cv2 |
| 5 | +import numpy as np |
| 6 | + |
| 7 | + |
| 8 | +def main(video, device): |
| 9 | + |
| 10 | + # init dict to track time for every stage at each iteration |
| 11 | + timers = { |
| 12 | + "full pipeline": [], |
| 13 | + "reading": [], |
| 14 | + "pre-process": [], |
| 15 | + "optical flow": [], |
| 16 | + "post-process": [], |
| 17 | + } |
| 18 | + |
| 19 | + # init video capture with video |
| 20 | + cap = cv2.VideoCapture(video) |
| 21 | + # get default video FPS |
| 22 | + fps = cap.get(cv2.CAP_PROP_FPS) |
| 23 | + # get total number of video frames |
| 24 | + num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| 25 | + |
| 26 | + # read the first frame |
| 27 | + ret, previous_frame = cap.read() |
| 28 | + |
| 29 | + # proceed if frame reading was successful |
| 30 | + if ret: |
| 31 | + # resize frame |
| 32 | + frame = cv2.resize(previous_frame, (960, 540)) |
| 33 | + # convert to gray |
| 34 | + previous_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 35 | + |
| 36 | + # create hsv output for optical flow |
| 37 | + hsv = np.zeros_like(frame) |
| 38 | + # set saturation to a maximum value |
| 39 | + hsv[..., 1] = 255 |
| 40 | + |
| 41 | + while True: |
| 42 | + # start full pipeline timer |
| 43 | + start_full_time = time.time() |
| 44 | + |
| 45 | + # start reading timer |
| 46 | + start_read_time = time.time() |
| 47 | + |
| 48 | + # capture frame-by-frame |
| 49 | + ret, current_frame = cap.read() |
| 50 | + |
| 51 | + # end reading timer |
| 52 | + end_read_time = time.time() |
| 53 | + # add elapsed iteration time |
| 54 | + timers["reading"].append(end_read_time - start_read_time) |
| 55 | + |
| 56 | + # if frame reading was not successful, break |
| 57 | + if not ret: |
| 58 | + break |
| 59 | + |
| 60 | + # start pre-process timer |
| 61 | + start_pre_time = time.time() |
| 62 | + # resize frame |
| 63 | + frame = cv2.resize(current_frame, (960, 540)) |
| 64 | + |
| 65 | + # convert to gray |
| 66 | + current_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 67 | + |
| 68 | + if device == "cpu": |
| 69 | + # end pre-process timer |
| 70 | + end_pre_time = time.time() |
| 71 | + # add elapsed iteration time |
| 72 | + timers["pre-process"].append(end_pre_time - start_pre_time) |
| 73 | + |
| 74 | + # start optical flow timer |
| 75 | + start_of = time.time() |
| 76 | + # calculate optical flow |
| 77 | + flow = cv2.calcOpticalFlowFarneback( |
| 78 | + previous_frame, current_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0, |
| 79 | + ) |
| 80 | + # end of timer |
| 81 | + end_of = time.time() |
| 82 | + # add elapsed iteration time |
| 83 | + timers["optical flow"].append(end_of - start_of) |
| 84 | + |
| 85 | + else: |
| 86 | + # move both frames to GPU |
| 87 | + cu_previous = cv2.cuda_GpuMat() |
| 88 | + cu_current = cv2.cuda_GpuMat() |
| 89 | + |
| 90 | + cu_previous.upload(previous_frame) |
| 91 | + cu_current.upload(current_frame) |
| 92 | + |
| 93 | + # end pre-process timer |
| 94 | + end_pre_time = time.time() |
| 95 | + # add elapsed iteration time |
| 96 | + timers["pre-process"].append(end_pre_time - start_pre_time) |
| 97 | + |
| 98 | + # start optical flow timer |
| 99 | + start_of = time.time() |
| 100 | + # create optical flow instance |
| 101 | + flow = cv2.cuda_FarnebackOpticalFlow.create( |
| 102 | + None, 0.5, 3, 15, 3, 5, 1.2, 0, |
| 103 | + ) |
| 104 | + # calculate optical flow |
| 105 | + flow = cv2.cuda_FarnebackOpticalFlow.calc( |
| 106 | + flow, cu_previous, cu_current, None, |
| 107 | + ) |
| 108 | + # sent result from GPU back to CPU |
| 109 | + flow = flow.download() |
| 110 | + |
| 111 | + # end of timer |
| 112 | + end_of = time.time() |
| 113 | + # add elapsed iteration time |
| 114 | + timers["optical flow"].append(end_of - start_of) |
| 115 | + |
| 116 | + # start post-process timer |
| 117 | + start_post_time = time.time() |
| 118 | + |
| 119 | + # convert from cartesian to polar to get magnitude and angle |
| 120 | + mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) |
| 121 | + # set hue according to the angle of optical flow |
| 122 | + hsv[..., 0] = ang * 180 / np.pi / 2 |
| 123 | + # set value according to the normalized magnitude of optical flow |
| 124 | + hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) |
| 125 | + # convert hsv to rgb |
| 126 | + rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) |
| 127 | + # update previous_frame value |
| 128 | + previous_frame = current_frame |
| 129 | + |
| 130 | + # end post-process timer |
| 131 | + end_post_time = time.time() |
| 132 | + # add elapsed iteration time |
| 133 | + timers["post-process"].append(end_post_time - start_post_time) |
| 134 | + |
| 135 | + # end full pipeline timer |
| 136 | + end_full_time = time.time() |
| 137 | + # add elapsed iteration time |
| 138 | + timers["full pipeline"].append(end_full_time - start_full_time) |
| 139 | + |
| 140 | + # visualization |
| 141 | + cv2.imshow("original", frame) |
| 142 | + cv2.imshow("result", rgb) |
| 143 | + k = cv2.waitKey(1) |
| 144 | + if k == 27: |
| 145 | + break |
| 146 | + |
| 147 | + # release the capture |
| 148 | + cap.release() |
| 149 | + # destroy all windows |
| 150 | + cv2.destroyAllWindows() |
| 151 | + |
| 152 | + # print results |
| 153 | + print("Number of frames: ", num_frames) |
| 154 | + |
| 155 | + # elapsed time at each stage |
| 156 | + print("Elapsed time") |
| 157 | + for stage, seconds in timers.items(): |
| 158 | + print("-", stage, ": {:0.2f} seconds".format(sum(seconds))) |
| 159 | + |
| 160 | + # calculate frames per second |
| 161 | + print("Default video FPS: {:0.2f}".format(fps)) |
| 162 | + |
| 163 | + of_fps = (num_frames - 1) / sum(timers["optical flow"]) |
| 164 | + print("Optical flow FPS: {:0.2f}".format(of_fps)) |
| 165 | + |
| 166 | + full_fps = (num_frames - 1) / sum(timers["full pipeline"]) |
| 167 | + print("Full pipeline FPS: {:0.2f}".format(full_fps)) |
| 168 | + |
| 169 | + |
| 170 | +if __name__ == "__main__": |
| 171 | + |
| 172 | + # init argument parser |
| 173 | + parser = argparse.ArgumentParser(description="OpenCV CPU/GPU Comparison") |
| 174 | + |
| 175 | + parser.add_argument( |
| 176 | + "--video", help="path to .mp4 video file", required=True, type=str, |
| 177 | + ) |
| 178 | + |
| 179 | + parser.add_argument( |
| 180 | + "--device", |
| 181 | + default="cpu", |
| 182 | + choices=["cpu", "gpu"], |
| 183 | + help="device to inference on", |
| 184 | + ) |
| 185 | + |
| 186 | + # parsing script arguments |
| 187 | + args = parser.parse_args() |
| 188 | + video = args.video |
| 189 | + device = args.device |
| 190 | + |
| 191 | + # run pipeline |
| 192 | + main(video, device) |
0 commit comments