diff --git a/.gitignore b/.gitignore
index fbc3a63..4bc53ac 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
simple_camera
+__pycache__
\ No newline at end of file
diff --git a/README.md b/README.md
index f5962ba..f3bc7a9 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ The camera should be installed in the MIPI-CSI Camera Connector on the carrier b
The new Jetson Nano B01 developer kit has two CSI camera slots. You can use the sensor_mode attribute with nvarguscamerasrc to specify the camera. Valid values are 0 or 1 (the default is 0 if not specified), i.e.
```
-nvarguscamerasrc sensor_mode=0
+nvarguscamerasrc sensor_id=0
```
To test the camera:
@@ -14,7 +14,8 @@ To test the camera:
```
# Simple Test
# Ctrl^C to exit
-$ gst-launch-1.0 nvarguscamerasrc ! nvoverlaysink
+# sensor_id selects the camera: 0 or 1 on Jetson Nano B01
+$ gst-launch-1.0 nvarguscamerasrc sensor_id=0 ! nvoverlaysink
# More specific - width, height and framerate are from supported video modes
# Example also shows sensor_mode paramter to nvarguscamerasrc
@@ -50,6 +51,7 @@ The final example is dual_camera.py. This example is for the newer rev B01 of th
$ python3 dual_camera.py
```
+The directory 'instrumented' contains instrumented code which can help adjust performance and frame rates.
Notes
@@ -58,7 +60,7 @@ You can use v4l2-ctl to determine the camera capabilities. v4l2-ctl is in the v4
$ sudo apt-get install v4l-utils
-For the Raspberry Pi V2 camera the output is (assuming the camera is /dev/video0):
+For the Raspberry Pi V2 camera, typically the output is (assuming the camera is /dev/video0):
```
$ v4l2-ctl --list-formats-ext
diff --git a/instrumented/csi_camera.py b/instrumented/csi_camera.py
new file mode 100644
index 0000000..f4063d8
--- /dev/null
+++ b/instrumented/csi_camera.py
@@ -0,0 +1,145 @@
+# MIT License
+# Copyright (c) 2019,2020 JetsonHacks
+# See license in root folder
+# CSI_Camera is a class which encapsulates an OpenCV VideoCapture element
+# The VideoCapture element is initialized via a GStreamer pipeline
+# The camera is read in a separate thread
+# The class also tracks how many frames are read from the camera;
+# The calling application tracks the frames_displayed
+
+# Let's use a repeating Timer for counting FPS
+import cv2
+import threading
+
+class RepeatTimer(threading.Timer):
+ def run(self):
+ while not self.finished.wait(self.interval):
+ self.function(*self.args, **self.kwargs)
+
+class CSI_Camera:
+
+ def __init__ (self) :
+ # Initialize instance variables
+ # OpenCV video capture element
+ self.video_capture = None
+ # The last captured image from the camera
+ self.frame = None
+ self.grabbed = False
+ # The thread where the video capture runs
+ self.read_thread = None
+ self.read_lock = threading.Lock()
+ self.running = False
+ self.fps_timer=None
+ self.frames_read=0
+ self.frames_displayed=0
+ self.last_frames_read=0
+ self.last_frames_displayed=0
+
+
+ def open(self, gstreamer_pipeline_string):
+ try:
+ self.video_capture = cv2.VideoCapture(
+ gstreamer_pipeline_string, cv2.CAP_GSTREAMER
+ )
+
+ except RuntimeError:
+ self.video_capture = None
+ print("Unable to open camera")
+ print("Pipeline: " + gstreamer_pipeline_string)
+ return
+ # Grab the first frame to start the video capturing
+ self.grabbed, self.frame = self.video_capture.read()
+
+ def start(self):
+ if self.running:
+ print('Video capturing is already running')
+ return None
+ # create a thread to read the camera image
+ if self.video_capture != None:
+ self.running=True
+ self.read_thread = threading.Thread(target=self.updateCamera)
+ self.read_thread.start()
+ return self
+
+ def stop(self):
+ self.running=False
+ self.read_thread.join()
+
+ def updateCamera(self):
+ # This is the thread to read images from the camera
+ while self.running:
+ try:
+ grabbed, frame = self.video_capture.read()
+ with self.read_lock:
+ self.grabbed=grabbed
+ self.frame=frame
+ self.frames_read += 1
+ except RuntimeError:
+ print("Could not read image from camera")
+ # FIX ME - stop and cleanup thread
+ # Something bad happened
+
+
+ def read(self):
+ with self.read_lock:
+ frame = self.frame.copy()
+ grabbed=self.grabbed
+ return grabbed, frame
+
+ def release(self):
+ if self.video_capture != None:
+ self.video_capture.release()
+ self.video_capture = None
+ # Kill the timer
+ self.fps_timer.cancel()
+ self.fps_timer.join()
+ # Now kill the thread
+ if self.read_thread != None:
+ self.read_thread.join()
+
+ def update_fps_stats(self):
+ self.last_frames_read=self.frames_read
+ self.last_frames_displayed=self.frames_displayed
+ # Start the next measurement cycle
+ self.frames_read=0
+ self.frames_displayed=0
+
+ def start_counting_fps(self):
+ self.fps_timer=RepeatTimer(1.0,self.update_fps_stats)
+ self.fps_timer.start()
+
+ @property
+ def gstreamer_pipeline(self):
+ return self._gstreamer_pipeline
+
+ # Currently there are setting frame rate on CSI Camera on Nano through gstreamer
+ # Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
+ def create_gstreamer_pipeline(
+ self,
+ sensor_id=0,
+ sensor_mode=3,
+ display_width=1280,
+ display_height=720,
+ framerate=60,
+ flip_method=0,
+ ):
+ self._gstreamer_pipeline = (
+ "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
+ "video/x-raw(memory:NVMM), "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ sensor_id,
+ sensor_mode,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
+
+
diff --git a/instrumented/dual_camera_fps.py b/instrumented/dual_camera_fps.py
index d368ab6..f4ba40f 100644
--- a/instrumented/dual_camera_fps.py
+++ b/instrumented/dual_camera_fps.py
@@ -10,152 +10,12 @@
# arranged horizontally.
# The camera streams are each read in their own thread, as when done sequentially there
# is a noticeable lag
-# For better performance, the next step would be to experiment with having the window display
-# in a separate thread
import cv2
-import threading
import numpy as np
+from csi_camera import CSI_Camera
-# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
-# Flip the image by setting the flip_method (most common values: 0 and 2)
-# display_width and display_height determine the size of each camera pane in the window on the screen
-
-left_camera = None
-right_camera = None
-
-# Let's use a repeating Timer for counting FPS
-class RepeatTimer(threading.Timer):
- def run(self):
- while not self.finished.wait(self.interval):
- self.function(*self.args, **self.kwargs)
-
-
-class CSI_Camera:
-
- def __init__ (self) :
- # Initialize instance variables
- # OpenCV video capture element
- self.video_capture = None
- # The last captured image from the camera
- self.frame = None
- self.grabbed = False
- # The thread where the video capture runs
- self.read_thread = None
- self.read_lock = threading.Lock()
- self.running = False
- self.fps_timer=None
- self.frames_read=0
- self.frames_displayed=0
- self.last_frames_read=0
- self.last_frames_displayed=0
-
-
- def open(self, gstreamer_pipeline_string):
- try:
- self.video_capture = cv2.VideoCapture(
- gstreamer_pipeline_string, cv2.CAP_GSTREAMER
- )
-
- except RuntimeError:
- self.video_capture = None
- print("Unable to open camera")
- print("Pipeline: " + gstreamer_pipeline_string)
- return
- # Grab the first frame to start the video capturing
- self.grabbed, self.frame = self.video_capture.read()
-
- def start(self):
- if self.running:
- print('Video capturing is already running')
- return None
- # create a thread to read the camera image
- if self.video_capture != None:
- self.running=True
- self.read_thread = threading.Thread(target=self.updateCamera)
- self.read_thread.start()
- return self
-
- def stop(self):
- self.running=False
- self.read_thread.join()
-
- def updateCamera(self):
- # This is the thread to read images from the camera
- while self.running:
- try:
- grabbed, frame = self.video_capture.read()
- with self.read_lock:
- self.grabbed=grabbed
- self.frame=frame
- self.frames_read=self.frames_read+1
- except RuntimeError:
- print("Could not read image from camera")
- # FIX ME - stop and cleanup thread
- # Something bad happened
-
-
- def read(self):
- with self.read_lock:
- frame = self.frame.copy()
- grabbed=self.grabbed
- return grabbed, frame
-
- def release(self):
- if self.video_capture != None:
- self.video_capture.release()
- self.video_capture = None
- # Kill the timer
- self.fps_timer.cancel()
- self.fps_timer.join()
- # Now kill the thread
- if self.read_thread != None:
- self.read_thread.join()
-
- def update_fps_stats(self):
- self.last_frames_read=self.frames_read
- self.last_frames_displayed=self.frames_displayed
- # Start the next measurement cycle
- self.frames_read=0
- self.frames_displayed=0
-
- def start_counting_fps(self):
- self.fps_timer=RepeatTimer(1.0,self.update_fps_stats)
- self.fps_timer.start()
-
-
-# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
-# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
-def gstreamer_pipeline(
- sensor_id=0,
- sensor_mode=3,
- capture_width=1280,
- capture_height=720,
- display_width=1280,
- display_height=720,
- framerate=60,
- flip_method=0,
-):
- return (
- "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
- "video/x-raw(memory:NVMM), "
- "width=(int)%d, height=(int)%d, "
- "format=(string)NV12, framerate=(fraction)%d/1 ! "
- "nvvidconv flip-method=%d ! "
- "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
- "videoconvert ! "
- "video/x-raw, format=(string)BGR ! appsink"
- % (
- sensor_id,
- sensor_mode,
- capture_width,
- capture_height,
- framerate,
- flip_method,
- display_width,
- display_height,
- )
- )
+show_fps = True
# Simple draw label on an image; in our case, the video frame
def draw_label(cv_image, label_text, label_position):
@@ -168,37 +28,48 @@ def draw_label(cv_image, label_text, label_position):
# Read a frame from the camera, and draw the FPS on the image if desired
# Return an image
-def read_camera(camera,display_fps):
- _ , camera_image=camera.read()
+def read_camera(csi_camera,display_fps):
+ _ , camera_image=csi_camera.read()
if display_fps:
- draw_label(camera_image, "Frames Displayed (PS): "+str(camera.last_frames_displayed),(10,20))
- draw_label(camera_image, "Frames Read (PS): "+str(camera.last_frames_read),(10,40))
+ draw_label(camera_image, "Frames Displayed (PS): "+str(csi_camera.last_frames_displayed),(10,20))
+ draw_label(camera_image, "Frames Read (PS): "+str(csi_camera.last_frames_read),(10,40))
return camera_image
+# Good for 1280x720
+DISPLAY_WIDTH=640
+DISPLAY_HEIGHT=360
+# For 1920x1080
+# DISPLAY_WIDTH=960
+# DISPLAY_HEIGHT=540
+
+# 1920x1080, 30 fps
+SENSOR_MODE_1080=2
+# 1280x720, 60 fps
+SENSOR_MODE_720=3
def start_cameras():
left_camera = CSI_Camera()
- left_camera.open(
- gstreamer_pipeline(
+ left_camera.create_gstreamer_pipeline(
sensor_id=0,
- sensor_mode=3,
+ sensor_mode=SENSOR_MODE_720,
+ framerate=30,
flip_method=0,
- display_height=540,
- display_width=960,
- )
+ display_height=DISPLAY_HEIGHT,
+ display_width=DISPLAY_WIDTH,
)
+ left_camera.open(left_camera.gstreamer_pipeline)
left_camera.start()
right_camera = CSI_Camera()
- right_camera.open(
- gstreamer_pipeline(
+ right_camera.create_gstreamer_pipeline(
sensor_id=1,
- sensor_mode=3,
+ sensor_mode=SENSOR_MODE_720,
+ framerate=30,
flip_method=0,
- display_height=540,
- display_width=960,
- )
+ display_height=DISPLAY_HEIGHT,
+ display_width=DISPLAY_WIDTH,
)
+ right_camera.open(right_camera.gstreamer_pipeline)
right_camera.start()
cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)
@@ -217,17 +88,16 @@ def start_cameras():
left_camera.start_counting_fps()
right_camera.start_counting_fps()
while cv2.getWindowProperty("CSI Cameras", 0) >= 0 :
- left_image=read_camera(left_camera,True)
- right_image=read_camera(right_camera,True)
+ left_image=read_camera(left_camera,show_fps)
+ right_image=read_camera(right_camera,show_fps)
# We place both images side by side to show in the window
camera_images = np.hstack((left_image, right_image))
cv2.imshow("CSI Cameras", camera_images)
- left_camera.frames_displayed=left_camera.frames_displayed+1
- right_camera.frames_displayed=right_camera.frames_displayed+1
+ left_camera.frames_displayed += 1
+ right_camera.frames_displayed += 1
# This also acts as a frame limiter
- keyCode = cv2.waitKey(25) & 0xFF
# Stop the program on the ESC key
- if keyCode == 27:
+ if (cv2.waitKey(5) & 0xFF) == 27:
break
finally:
diff --git a/instrumented/dual_camera_naive.py b/instrumented/dual_camera_naive.py
new file mode 100644
index 0000000..84c5968
--- /dev/null
+++ b/instrumented/dual_camera_naive.py
@@ -0,0 +1,85 @@
+# MIT License
+# Copyright (c) 2019 JetsonHacks
+# See license
+# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
+# NVIDIA Jetson Nano Developer Kit using OpenCV
+# Drivers for the camera and OpenCV are included in the base image
+
+import cv2
+from timecontext import Timer
+import numpy as np
+
+# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
+# Defaults to 1280x720 @ 60fps
+# Flip the image by setting the flip_method (most common values: 0 and 2)
+# display_width and display_height determine the size of the window on the screen
+
+
+def gstreamer_pipeline(
+ sensor_id=0,
+ sensor_mode=3,
+ capture_width=1280,
+ capture_height=720,
+ display_width=1280,
+ display_height=720,
+ framerate=60,
+ flip_method=0,
+):
+ return (
+ "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
+ "video/x-raw(memory:NVMM), "
+ "width=(int)%d, height=(int)%d, "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ sensor_id,
+ sensor_mode,
+ capture_width,
+ capture_height,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
+
+def show_camera():
+ # To flip the image, modify the flip_method parameter (0 and 2 are the most common)
+ print(gstreamer_pipeline(flip_method=0))
+ left_cap = cv2.VideoCapture(
+ gstreamer_pipeline(flip_method=0,display_width=640,display_height=360,framerate=30), cv2.CAP_GSTREAMER)
+ right_cap = cv2.VideoCapture(gstreamer_pipeline(
+ flip_method=0, sensor_id=1,display_width=640,display_height=360,framerate=30), cv2.CAP_GSTREAMER)
+ if left_cap.isOpened():
+ window_handle = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
+ # Window
+ while cv2.getWindowProperty("CSI Camera", 0) >= 0:
+ with Timer() as context_time:
+ ret_val, left_image = left_cap.read()
+ ret_val, right_image = right_cap.read()
+ # print(context_time.elapsed)
+ # We place both images side by side to show in the window
+ camera_images = np.hstack((left_image, right_image))
+ cv2.imshow("CSI Cameras", camera_images)
+ # cv2.imshow("CSI Camera", left_image)
+ # print(context_time.elapsed)
+
+ # This also acts as
+ keyCode = cv2.waitKey(5) & 0xFF
+ # print(context_time.elapsed)
+ # print("---")
+ # Stop the program on the ESC key
+ if keyCode == 27:
+ break
+ left_cap.release()
+ cv2.destroyAllWindows()
+ else:
+ print("Unable to open camera")
+
+
+if __name__ == "__main__":
+ show_camera()
diff --git a/instrumented/face_detect_faster.py b/instrumented/face_detect_faster.py
new file mode 100644
index 0000000..f3611b0
--- /dev/null
+++ b/instrumented/face_detect_faster.py
@@ -0,0 +1,106 @@
+# MIT License
+# Copyright (c) 2019 JetsonHacks
+# See LICENSE for OpenCV license and additional information
+
+# https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_face_detection.html
+# On the Jetson Nano, OpenCV comes preinstalled
+# Data files are in /usr/sharc/OpenCV
+
+import cv2
+import numpy as np
+from csi_camera import CSI_Camera
+
+show_fps = True
+
+# Simple draw label on an image; in our case, the video frame
+def draw_label(cv_image, label_text, label_position):
+ font_face = cv2.FONT_HERSHEY_SIMPLEX
+ scale = 0.5
+ color = (255,255,255)
+ thickness = cv2.FILLED
+ # You can get the size of the string with cv2.getTextSize here
+ cv2.putText(cv_image, label_text, label_position, font_face, scale, color, 1, cv2.LINE_AA)
+
+# Read a frame from the camera, and draw the FPS on the image if desired
+# Return an image
+def read_camera(csi_camera,display_fps):
+ _ , camera_image=csi_camera.read()
+ if display_fps:
+ draw_label(camera_image, "Frames Displayed (PS): "+str(csi_camera.last_frames_displayed),(10,20))
+ draw_label(camera_image, "Frames Read (PS): "+str(csi_camera.last_frames_read),(10,40))
+ return camera_image
+
+# Good for 1280x720
+DISPLAY_WIDTH=640
+DISPLAY_HEIGHT=360
+# For 1920x1080
+# DISPLAY_WIDTH=960
+# DISPLAY_HEIGHT=540
+
+# 1920x1080, 30 fps
+SENSOR_MODE_1080=2
+# 1280x720, 60 fps
+SENSOR_MODE_720=3
+
+def face_detect():
+ face_cascade = cv2.CascadeClassifier(
+ "/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml"
+ )
+ eye_cascade = cv2.CascadeClassifier(
+ "/usr/share/opencv4/haarcascades/haarcascade_eye.xml"
+ )
+ left_camera = CSI_Camera()
+ left_camera.create_gstreamer_pipeline(
+ sensor_id=0,
+ sensor_mode=SENSOR_MODE_720,
+ framerate=30,
+ flip_method=0,
+ display_height=DISPLAY_HEIGHT,
+ display_width=DISPLAY_WIDTH,
+ )
+ left_camera.open(left_camera.gstreamer_pipeline)
+ left_camera.start()
+ cv2.namedWindow("Face Detect", cv2.WINDOW_AUTOSIZE)
+
+ if (
+ not left_camera.video_capture.isOpened()
+ ):
+ # Cameras did not open, or no camera attached
+
+ print("Unable to open any cameras")
+ # TODO: Proper Cleanup
+ SystemExit(0)
+ try:
+ # Start counting the number of frames read and displayed
+ left_camera.start_counting_fps()
+ while cv2.getWindowProperty("Face Detect", 0) >= 0 :
+ img=read_camera(left_camera,False)
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
+
+ for (x, y, w, h) in faces:
+ cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
+ roi_gray = gray[y : y + h, x : x + w]
+ roi_color = img[y : y + h, x : x + w]
+ eyes = eye_cascade.detectMultiScale(roi_gray)
+ for (ex, ey, ew, eh) in eyes:
+ cv2.rectangle(
+ roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2
+ )
+ if show_fps:
+ draw_label(img, "Frames Displayed (PS): "+str(left_camera.last_frames_displayed),(10,20))
+ draw_label(img, "Frames Read (PS): "+str(left_camera.last_frames_read),(10,40))
+ cv2.imshow("Face Detect", img)
+ left_camera.frames_displayed += 1
+ keyCode = cv2.waitKey(5) & 0xFF
+ # Stop the program on the ESC key
+ if keyCode == 27:
+ break
+ finally:
+ left_camera.stop()
+ left_camera.release()
+ cv2.destroyAllWindows()
+
+
+if __name__ == "__main__":
+ face_detect()
diff --git a/instrumented/face_detect_fps.py b/instrumented/face_detect_fps.py
new file mode 100644
index 0000000..9b3c10c
--- /dev/null
+++ b/instrumented/face_detect_fps.py
@@ -0,0 +1,122 @@
+# MIT License
+# Copyright (c) 2019 JetsonHacks
+# See LICENSE for OpenCV license and additional information
+
+# https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_face_detection.html
+# On the Jetson Nano, OpenCV comes preinstalled
+# Data files are in /usr/sharc/OpenCV
+import numpy as np
+import cv2
+import threading
+from timecontext import Timer
+
+# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
+# Defaults to 1280x720 @ 30fps
+# Flip the image by setting the flip_method (most common values: 0 and 2)
+# display_width and display_height determine the size of the window on the screen
+
+class RepeatTimer(threading.Timer):
+ def run(self):
+ while not self.finished.wait(self.interval):
+ self.function(*self.args, **self.kwargs)
+
+frames_displayed=0
+fps_timer=None
+
+def update_fps_stats():
+ global frames_displayed
+ print("======")
+ print("FPS: "+str(frames_displayed))
+ frames_displayed=0
+
+def start_counting_fps():
+ global fps_timer
+ print("starting to count fps")
+ fps_timer=RepeatTimer(1.0,update_fps_stats)
+ fps_timer.start()
+
+def gstreamer_pipeline(
+ capture_width=3280,
+ capture_height=2464,
+ display_width=820,
+ display_height=616,
+ framerate=21,
+ flip_method=0,
+):
+ return (
+ "nvarguscamerasrc ! "
+ "video/x-raw(memory:NVMM), "
+ "width=(int)%d, height=(int)%d, "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ capture_width,
+ capture_height,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
+
+def face_detect():
+ global frames_displayed
+ global fps_timer
+ face_cascade = cv2.CascadeClassifier(
+ "/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml"
+ )
+ eye_cascade = cv2.CascadeClassifier(
+ "/usr/share/opencv4/haarcascades/haarcascade_eye.xml"
+ )
+ cap = cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER)
+ if cap.isOpened():
+ try:
+ cv2.namedWindow("Face Detect", cv2.WINDOW_AUTOSIZE)
+ # Setup our Frames per second counter
+ start_counting_fps()
+ while cv2.getWindowProperty("Face Detect", 0) >= 0:
+ with Timer() as measure :
+ ret, img = cap.read()
+ print("---")
+ print("Read Cam:" + str(measure.elapsed))
+ before=measure.elapsed
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
+ print("detectMultipleScale: "+str(measure.elapsed-before))
+ before=measure.elapsed
+ for (x, y, w, h) in faces:
+ cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
+ roi_gray = gray[y : y + h, x : x + w]
+ roi_color = img[y : y + h, x : x + w]
+ eyes = eye_cascade.detectMultiScale(roi_gray)
+ for (ex, ey, ew, eh) in eyes:
+ cv2.rectangle(
+ roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2
+ )
+ print("eyeCascade: "+str(measure.elapsed-before))
+ print(measure.elapsed)
+ cv2.imshow("Face Detect", img)
+
+ print("Elapsed time: "+str(measure.elapsed))
+ frames_displayed = frames_displayed+1
+ keyCode = cv2.waitKey(5) & 0xFF
+ # Stop the program on the ESC key
+ if keyCode == 27:
+ break
+ finally:
+ fps_timer.cancel()
+ fps_timer.join()
+
+ cap.release()
+ # Kill the fps timer
+ cv2.destroyAllWindows()
+ else:
+ print("Unable to open camera")
+
+
+if __name__ == "__main__":
+ face_detect()
diff --git a/instrumented/simple_camera.py b/instrumented/simple_camera.py
new file mode 100644
index 0000000..2241af6
--- /dev/null
+++ b/instrumented/simple_camera.py
@@ -0,0 +1,73 @@
+# MIT License
+# Copyright (c) 2019 JetsonHacks
+# See license
+# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
+# NVIDIA Jetson Nano Developer Kit using OpenCV
+# Drivers for the camera and OpenCV are included in the base image
+
+import cv2
+from timecontext import Timer
+
+# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
+# Defaults to 1280x720 @ 60fps
+# Flip the image by setting the flip_method (most common values: 0 and 2)
+# display_width and display_height determine the size of the window on the screen
+
+
+def gstreamer_pipeline(
+ capture_width=1280,
+ capture_height=720,
+ display_width=1280,
+ display_height=720,
+ framerate=60,
+ flip_method=0,
+):
+ return (
+ "nvarguscamerasrc ! "
+ "video/x-raw(memory:NVMM), "
+ "width=(int)%d, height=(int)%d, "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ capture_width,
+ capture_height,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
+
+def show_camera():
+ # To flip the image, modify the flip_method parameter (0 and 2 are the most common)
+ print(gstreamer_pipeline(flip_method=0))
+ cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
+ if cap.isOpened():
+ window_handle = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
+ # Window
+ while cv2.getWindowProperty("CSI Camera", 0) >= 0:
+ with Timer() as context_time:
+ ret_val, img = cap.read()
+ print(context_time.elapsed)
+ cv2.imshow("CSI Camera", img)
+ print(context_time.elapsed)
+
+ # This also acts as
+ keyCode = cv2.waitKey(20) & 0xFF
+ print(context_time.elapsed)
+ print("---")
+ # Stop the program on the ESC key
+ if keyCode == 27:
+ break
+ cap.release()
+ cv2.destroyAllWindows()
+ else:
+ print("Unable to open camera")
+
+
+if __name__ == "__main__":
+ show_camera()
diff --git a/instrumented/timecontext.py b/instrumented/timecontext.py
new file mode 100644
index 0000000..9d01fe1
--- /dev/null
+++ b/instrumented/timecontext.py
@@ -0,0 +1,25 @@
+from timeit import default_timer
+
+class Timer:
+ def __init__(self):
+ self.timer=default_timer
+ self.end_time=None
+
+ def __call__(self):
+ return self.timer()
+
+ def __enter__(self):
+ print("Entering context")
+ self.start_time=self()
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.end_time=self()
+
+ @property
+ def elapsed(self):
+ if self.end_time is None:
+ return self()-self.start_time
+ else:
+ return self.end_time-self.start_time
+