diff --git a/.gitignore b/.gitignore index 4bc53ac..5391d87 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,138 @@ -simple_camera -__pycache__ \ No newline at end of file +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ \ No newline at end of file diff --git a/README.md b/README.md index 8caefcf..8f528d8 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,9 @@ To test the camera: ``` # Simple Test # Ctrl^C to exit +# Jetson Nano A01 +$ gst-launch-1.0 nvarguscamerasrc ! nvoverlaysink + # sensor_id selects the camera: 0 or 1 on Jetson Nano B01 $ gst-launch-1.0 nvarguscamerasrc sensor_id=0 ! nvoverlaysink diff --git a/face_detect.ipynb b/face_detect.ipynb new file mode 100644 index 0000000..906f324 --- /dev/null +++ b/face_detect.ipynb @@ -0,0 +1,180 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import IPython.display\n", + "import PIL.Image\n", + "import time\n", + "from io import BytesIO\n", + "import ipywidgets as widgets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera\n", + "# Defaults to 1280x720 @ 30fps\n", + "# Flip the image by setting the flip_method (most common values: 0 and 2)\n", + "# display_width and display_height determine the size of the window on the screen\n", + "\n", + "\n", + "def gstreamer_pipeline(\n", + " capture_width=3280,\n", + " capture_height=2464,\n", + " display_width=820,\n", + " display_height=616,\n", + " framerate=21,\n", + " flip_method=0,\n", + "):\n", + " return (\n", + " \"nvarguscamerasrc ! \"\n", + " \"video/x-raw(memory:NVMM), \"\n", + " \"width=(int)%d, height=(int)%d, \"\n", + " \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n", + " \"nvvidconv flip-method=%d ! \"\n", + " \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n", + " \"videoconvert ! \"\n", + " \"video/x-raw, format=(string)BGR ! appsink\"\n", + " % (\n", + " capture_width,\n", + " capture_height,\n", + " framerate,\n", + " flip_method,\n", + " display_width,\n", + " display_height,\n", + " )\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Use 'jpeg' instead of 'png' (~5 times faster)\n", + "def show_array(a, display_id=None, fmt='jpeg'):\n", + " f = BytesIO()\n", + " PIL.Image.fromarray(a).save(f, fmt)\n", + " obj = IPython.display.Image(data=f.getvalue())\n", + " if display_id is not None:\n", + " IPython.display.update_display(obj, display_id=display_id)\n", + " return display_id\n", + " else:\n", + " return IPython.display.display(obj, display_id=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def face_detect():\n", + " # To flip the image, modify the flip_method parameter (0 and 2 are the most common)\n", + " print(gstreamer_pipeline(flip_method=2))\n", + " face_cascade = cv2.CascadeClassifier(\n", + " \"/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml\"\n", + " )\n", + " eye_cascade = cv2.CascadeClassifier(\n", + " \"/usr/share/opencv4/haarcascades/haarcascade_eye.xml\"\n", + " )\n", + " # Video capturing from OpenCV\n", + " video_capture = cv2.VideoCapture(gstreamer_pipeline(flip_method=2), cv2.CAP_GSTREAMER)\n", + " display_id = None\n", + " fps_output = widgets.Output()\n", + " IPython.display.display(fps_output)\n", + " if video_capture.isOpened():\n", + " try:\n", + " while True:\n", + " t1 = time.time()\n", + " \n", + " return_value, frame = video_capture.read()\n", + " \n", + " if not return_value:\n", + " print(f\"return_value: {return_value}\")\n", + " break\n", + " \n", + " # Convert the image from OpenCV BGR format to matplotlib RGB format\n", + " # to display the image\n", + " gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n", + " faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n", + "\n", + " for (x, y, w, h) in faces:\n", + " cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n", + " roi_gray = gray[y : y + h, x : x + w]\n", + " roi_color = frame[y : y + h, x : x + w]\n", + " eyes = eye_cascade.detectMultiScale(roi_gray)\n", + " for (ex, ey, ew, eh) in eyes:\n", + " cv2.rectangle(\n", + " roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2\n", + " )\n", + "\n", + " if display_id is not None:\n", + " show_array(frame, display_id)\n", + " else:\n", + " display_handle = show_array(frame)\n", + " display_id = display_handle.display_id\n", + " \n", + " t2 = time.time()\n", + "\n", + " #ref: https://github.com/jupyter-widgets/ipywidgets/issues/1744#issuecomment-335179855\n", + " with fps_output:\n", + " print(f\"display_id: {display_id}\")\n", + " print(f\"{(1/(t2-t1)):.4f} FPS\")\n", + " # Display the frame info until new frame is available\n", + " IPython.display.clear_output(wait=True)\n", + " \n", + " except KeyboardInterrupt as e:\n", + " print(f\"KeyboardInterrupt\")\n", + " except Exception as e:\n", + " print(f\"Exception: {e}\")\n", + " finally:\n", + " # Release the Video Device\n", + " video_capture.release()\n", + " # Message to be displayed after releasing the device\n", + " print(\"Released Video Resource\")\n", + " else:\n", + " print(\"Unable to open camera\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "face_detect()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/instrumented/face_detect_faster.ipynb b/instrumented/face_detect_faster.ipynb new file mode 100644 index 0000000..d3b426d --- /dev/null +++ b/instrumented/face_detect_faster.ipynb @@ -0,0 +1,225 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import numpy as np\n", + "from csi_camera import CSI_Camera\n", + "import IPython.display\n", + "import PIL.Image\n", + "from io import BytesIO\n", + "import ipywidgets as widgets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_fps = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simple draw label on an image; in our case, the video frame\n", + "def draw_label(cv_image, label_text, label_position):\n", + " font_face = cv2.FONT_HERSHEY_SIMPLEX\n", + " scale = 0.5\n", + " color = (255,255,255)\n", + " # You can get the size of the string with cv2.getTextSize here\n", + " cv2.putText(cv_image, label_text, label_position, font_face, scale, color, 1, cv2.LINE_AA)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Read a frame from the camera, and draw the FPS on the image if desired\n", + "# Return an image\n", + "def read_camera(csi_camera, display_fps):\n", + " _ , camera_image=csi_camera.read()\n", + " if display_fps:\n", + " draw_label(camera_image, \"Frames Displayed (PS): \"+str(csi_camera.last_frames_displayed),(10,20))\n", + " draw_label(camera_image, \"Frames Read (PS): \"+str(csi_camera.last_frames_read),(10,40))\n", + " return camera_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Use 'jpeg' instead of 'png' (~5 times faster)\n", + "def show_array_IPython():\n", + " display_id = None\n", + " def wrapper(array: np.ndarray, fmt='jpeg'):\n", + " nonlocal display_id\n", + " f = BytesIO()\n", + " PIL.Image.fromarray(array).save(f, fmt)\n", + " obj = IPython.display.Image(data=f.getvalue())\n", + " if display_id is not None:\n", + " IPython.display.update_display(obj, display_id=display_id)\n", + " else:\n", + " display_id = IPython.display.display(obj, display_id=True).display_id\n", + " return wrapper" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# def outter():\n", + "# id = None\n", + "# def inner(new_id = None):\n", + "# nonlocal id\n", + "# if new_id is not None:\n", + "# id = new_id\n", + "# return id\n", + "# return inner\n", + "# o1 = outter()\n", + "# o2 = outter()\n", + "# o1(\"aaaa\")\n", + "# o1()\n", + "# o2()\n", + "# o2(\"bbbbb\")\n", + "# o2()\n", + "# o1()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Good for 1280x720\n", + "DISPLAY_WIDTH=640\n", + "DISPLAY_HEIGHT=360\n", + "# For 1920x1080\n", + "# DISPLAY_WIDTH=960\n", + "# DISPLAY_HEIGHT=540\n", + "\n", + "# 1920x1080, 30 fps\n", + "SENSOR_MODE_1080=2\n", + "# 1280x720, 60 fps\n", + "SENSOR_MODE_720=3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def face_detect():\n", + " face_cascade = cv2.CascadeClassifier(\n", + " \"/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml\"\n", + " )\n", + " eye_cascade = cv2.CascadeClassifier(\n", + " \"/usr/share/opencv4/haarcascades/haarcascade_eye.xml\"\n", + " )\n", + " left_camera = CSI_Camera()\n", + " left_camera.create_gstreamer_pipeline(\n", + " sensor_id=0,\n", + " sensor_mode=SENSOR_MODE_720,\n", + " framerate=60,\n", + " flip_method=2,\n", + " display_height=DISPLAY_HEIGHT,\n", + " display_width=DISPLAY_WIDTH,\n", + " )\n", + " left_camera.open(left_camera.gstreamer_pipeline)\n", + " left_camera.start()\n", + "\n", + " if (\n", + " not left_camera.video_capture.isOpened()\n", + " ):\n", + " # Cameras did not open, or no camera attached\n", + "\n", + " print(\"Unable to open any cameras\")\n", + " # TODO: Proper Cleanup\n", + " SystemExit(0)\n", + " try:\n", + " # Start counting the number of frames read and displayed\n", + " left_camera.start_counting_fps()\n", + " show = show_array_IPython()\n", + " while True:\n", + " img = read_camera(left_camera,False)\n", + " # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", + " \n", + " #--- Start Face Detection ---#\n", + " gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n", + " faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n", + "\n", + " for (x, y, w, h) in faces:\n", + " cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n", + " roi_gray = gray[y : y + h, x : x + w]\n", + " roi_color = img[y : y + h, x : x + w]\n", + " eyes = eye_cascade.detectMultiScale(roi_gray)\n", + " for (ex, ey, ew, eh) in eyes:\n", + " cv2.rectangle(\n", + " roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2\n", + " )\n", + " #--- End Face Detection ---#\n", + " \n", + " if show_fps:\n", + " draw_label(img, \"Frames Displayed (PS): \"+str(left_camera.last_frames_displayed),(10,20))\n", + " draw_label(img, \"Frames Read (PS): \"+str(left_camera.last_frames_read),(10,40))\n", + " # cv2.imshow(\"Face Detect\", img)\n", + " show(img)\n", + " left_camera.frames_displayed += 1\n", + " except KeyboardInterrupt as e:\n", + " print(f\"KeyboardInterrupt\")\n", + " except Exception as e:\n", + " print(f\"Exception: {e}\")\n", + " finally:\n", + " left_camera.stop()\n", + " left_camera.release()\n", + " print(\"Released Video Resource\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "face_detect()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/simple_camera.ipynb b/simple_camera.ipynb new file mode 100644 index 0000000..95ada6f --- /dev/null +++ b/simple_camera.ipynb @@ -0,0 +1,157 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import IPython.display\n", + "import PIL.Image\n", + "import time\n", + "from io import BytesIO\n", + "import ipywidgets as widgets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def gstreamer_pipeline(\n", + " capture_width=1280,\n", + " capture_height=720,\n", + " display_width=1280,\n", + " display_height=720,\n", + " framerate=60,\n", + " flip_method=0,\n", + "):\n", + " return (\n", + " \"nvarguscamerasrc ! \"\n", + " \"video/x-raw(memory:NVMM), \"\n", + " \"width=(int)%d, height=(int)%d, \"\n", + " \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n", + " \"nvvidconv flip-method=%d ! \"\n", + " \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n", + " \"videoconvert ! \"\n", + " \"video/x-raw, format=(string)BGR ! appsink\"\n", + " % (\n", + " capture_width,\n", + " capture_height,\n", + " framerate,\n", + " flip_method,\n", + " display_width,\n", + " display_height,\n", + " )\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Use 'jpeg' instead of 'png' (~5 times faster)\n", + "def show_array(a, display_id=None, fmt='jpeg'):\n", + " f = BytesIO()\n", + " PIL.Image.fromarray(a).save(f, fmt)\n", + " obj = IPython.display.Image(data=f.getvalue())\n", + " if display_id is not None:\n", + " IPython.display.update_display(obj, display_id=display_id)\n", + " return display_id\n", + " else:\n", + " return IPython.display.display(obj, display_id=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def show_camera():\n", + " # To flip the image, modify the flip_method parameter (0 and 2 are the most common)\n", + " print(gstreamer_pipeline(flip_method=0))\n", + " # Video capturing from OpenCV\n", + " video_capture = cv2.VideoCapture(gstreamer_pipeline(flip_method=2), cv2.CAP_GSTREAMER)\n", + " display_id = None\n", + " fps_output = widgets.Output()\n", + " IPython.display.display(fps_output)\n", + " if video_capture.isOpened():\n", + " try:\n", + " while True:\n", + " t1 = time.time()\n", + " \n", + " return_value, frame = video_capture.read()\n", + " \n", + " if not return_value:\n", + " print(f\"return_value: {return_value}\")\n", + " break\n", + " \n", + " # Convert the image from OpenCV BGR format to matplotlib RGB format\n", + " # to display the image\n", + " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", + " \n", + " if display_id is not None:\n", + " show_array(frame, display_id)\n", + " else:\n", + " display_handle = show_array(frame)\n", + " display_id = display_handle.display_id\n", + " \n", + " t2 = time.time()\n", + "\n", + " #ref: https://github.com/jupyter-widgets/ipywidgets/issues/1744#issuecomment-335179855\n", + " with fps_output:\n", + " print(f\"display_id: {display_id}\")\n", + " print(f\"{(1/(t2-t1)):.4f} FPS\")\n", + " # Display the frame info until new frame is available\n", + " IPython.display.clear_output(wait=True)\n", + " \n", + " except KeyboardInterrupt as e:\n", + " print(f\"KeyboardInterrupt\")\n", + " except Exception as e:\n", + " print(f\"Exception: {e}\")\n", + " finally:\n", + " # Release the Video Device\n", + " video_capture.release()\n", + " # Message to be displayed after releasing the device\n", + " print(\"Released Video Resource\")\n", + " else:\n", + " print(\"Unable to open camera\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_camera()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}