diff --git a/.github/workflows/workflow-validate.yaml b/.github/workflows/workflow-validate.yaml index e3eb45f42a..1057becf1d 100644 --- a/.github/workflows/workflow-validate.yaml +++ b/.github/workflows/workflow-validate.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v4 - name: Run spell check run: | - pip install codespell + pip install codespell==2.3.0 codespell -I "scripts/resources/spell-check-ignore-list.txt" --skip="*.svg,*.dxf,*.pdf" ./content/ lint: diff --git a/content/hardware/01.mkr/01.boards/mkr-wan-1300/compatibility.yml b/content/hardware/01.mkr/01.boards/mkr-wan-1300/compatibility.yml index 1c8a8ce208..03bb6e1d97 100644 --- a/content/hardware/01.mkr/01.boards/mkr-wan-1300/compatibility.yml +++ b/content/hardware/01.mkr/01.boards/mkr-wan-1300/compatibility.yml @@ -8,7 +8,6 @@ hardware: carriers: ~ shields: - mkr-485-shield - - mkr-can-shield - mkr-env-shield - mkr-eth-shield - mkr-imu-shield diff --git a/content/hardware/01.mkr/01.boards/mkr-wan-1310/compatibility.yml b/content/hardware/01.mkr/01.boards/mkr-wan-1310/compatibility.yml index 1c8a8ce208..03bb6e1d97 100644 --- a/content/hardware/01.mkr/01.boards/mkr-wan-1310/compatibility.yml +++ b/content/hardware/01.mkr/01.boards/mkr-wan-1310/compatibility.yml @@ -8,7 +8,6 @@ hardware: carriers: ~ shields: - mkr-485-shield - - mkr-can-shield - mkr-env-shield - mkr-eth-shield - mkr-imu-shield diff --git a/content/hardware/01.mkr/02.shields/mkr-can-shield/compatibility.yml b/content/hardware/01.mkr/02.shields/mkr-can-shield/compatibility.yml index b9daef6671..0668458676 100644 --- a/content/hardware/01.mkr/02.shields/mkr-can-shield/compatibility.yml +++ b/content/hardware/01.mkr/02.shields/mkr-can-shield/compatibility.yml @@ -7,8 +7,6 @@ hardware: - mkr-1000-wifi - mkr-wifi-1010 - mkr-fox-1200 - - mkr-wan-1300 - - mkr-wan-1310 - mkr-gsm-1400 - mkr-nb-1500 - mkr-vidor-4000 diff --git a/content/hardware/02.hero/boards/uno-r4-minima/tech-specs.yml b/content/hardware/02.hero/boards/uno-r4-minima/tech-specs.yml index 9b6994fe04..8a13a89add 100644 --- a/content/hardware/02.hero/boards/uno-r4-minima/tech-specs.yml +++ b/content/hardware/02.hero/boards/uno-r4-minima/tech-specs.yml @@ -1,6 +1,6 @@ Board: Name: Arduino® UNO R4 Minima - SKU: ABX00080 + SKU: ABX00080 / ABX00080_CN / ABX00080_IN Microcontroller: Renesas RA4M1 (Arm® Cortex®-M4) USB: USB-C®: Programming Port diff --git a/content/hardware/02.hero/boards/uno-r4-wifi/tech-specs.yml b/content/hardware/02.hero/boards/uno-r4-wifi/tech-specs.yml index a16acd9463..a6c3f105e2 100644 --- a/content/hardware/02.hero/boards/uno-r4-wifi/tech-specs.yml +++ b/content/hardware/02.hero/boards/uno-r4-wifi/tech-specs.yml @@ -1,6 +1,6 @@ Board: Name: Arduino® UNO R4 WiFi - SKU: ABX00087 + SKU: ABX00087 / ABX00087_CN / ABX00087_IN Microcontroller: Renesas RA4M1 (Arm® Cortex®-M4) Radio Module: ESP32-S3-MINI-1-N8 USB: @@ -15,7 +15,7 @@ Communication: UART: Yes, 1x I2C: Yes, 1x SPI: Yes, 1x - CAN: Yes 1 CAN Bus + CAN: Yes 1x CAN Bus Power: Circuit operating voltage: 5 V (ESP32-S3 is 3.3 V) Input voltage (VIN): 6-24 V diff --git a/content/hardware/03.nano/boards/nano-esp32/certifications/Arduino_ABX00083-CERT_RoHS_with or without headers.pdf b/content/hardware/03.nano/boards/nano-esp32/certifications/Arduino_ABX00083-CERT_RoHS_with or without headers.pdf index df083547bc..4e7041d933 100644 Binary files a/content/hardware/03.nano/boards/nano-esp32/certifications/Arduino_ABX00083-CERT_RoHS_with or without headers.pdf and b/content/hardware/03.nano/boards/nano-esp32/certifications/Arduino_ABX00083-CERT_RoHS_with or without headers.pdf differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/essentials.md b/content/hardware/04.pro/shields/portenta-vision-shield/essentials.md index be06ebc99b..18cecffafc 100644 --- a/content/hardware/04.pro/shields/portenta-vision-shield/essentials.md +++ b/content/hardware/04.pro/shields/portenta-vision-shield/essentials.md @@ -4,10 +4,12 @@ productsLibrariesMap: --- - - A quick guide to installing your shield with OpenMV IDE. + + A full guide to the basics of the Vision Shield. - + + A quick guide to testing your shield with Arduino IDE. + diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/product.md b/content/hardware/04.pro/shields/portenta-vision-shield/product.md index 67e91bed8c..64cdb15ec4 100644 --- a/content/hardware/04.pro/shields/portenta-vision-shield/product.md +++ b/content/hardware/04.pro/shields/portenta-vision-shield/product.md @@ -1,9 +1,10 @@ --- title: Portenta Vision Shield url_shop: https://store.arduino.cc/portenta-vision-shield -url_guide: /tutorials/portenta-vision-shield/getting-started-camera -primary_button_url: /tutorials/portenta-vision-shield/getting-started-camera -primary_button_title: Get Started + +url_guide: /tutorials/portenta-vision-shield/user-manual +primary_button_url: /tutorials/portenta-vision-shield/user-manual +primary_button_title: User Manual secondary_button_url: /tutorials/portenta-vision-shield/things-network-openmv secondary_button_title: TTN OpenMV Guide core: arduino:mbed_portenta diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/antenna.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/antenna.png new file mode 100644 index 0000000000..39ff577dfd Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/antenna.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-bottom.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-bottom.png new file mode 100644 index 0000000000..51b4d8b481 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-bottom.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-top-c.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-top-c.png new file mode 100644 index 0000000000..630150fa76 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/arch-top-c.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/bar-codes.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/bar-codes.gif new file mode 100644 index 0000000000..415e2d227f Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/bar-codes.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/camera.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/camera.png new file mode 100644 index 0000000000..701dca8d63 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/camera.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/click-connect.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/click-connect.png new file mode 100644 index 0000000000..9667833ad5 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/click-connect.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd-connected.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd-connected.png new file mode 100644 index 0000000000..d26599aa86 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd-connected.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd.png new file mode 100644 index 0000000000..2f1809015f Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/cmd.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/create-pro.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/create-pro.png new file mode 100644 index 0000000000..fdf2fa59a2 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/create-pro.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ei-landing.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ei-landing.png new file mode 100644 index 0000000000..0646e8f059 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ei-landing.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-connect.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-connect.png new file mode 100644 index 0000000000..1621b54400 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-connect.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-pinout.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-pinout.png new file mode 100644 index 0000000000..46988b3617 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ethernet-pinout.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/face.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/face.gif new file mode 100644 index 0000000000..5e7e23975f Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/face.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/fft.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/fft.gif new file mode 100644 index 0000000000..bf17395147 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/fft.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-connect.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-connect.png new file mode 100644 index 0000000000..0e6b43dd41 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-connect.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-open.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-open.png new file mode 100644 index 0000000000..c569b7cd35 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/first-open.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/flashing.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/flashing.png new file mode 100644 index 0000000000..90f488114c Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/flashing.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/gain.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/gain.gif new file mode 100644 index 0000000000..55cb28e115 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/gain.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/h7_vision-shield.mp4 b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/h7_vision-shield.mp4 new file mode 100644 index 0000000000..6f9b35787f Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/h7_vision-shield.mp4 differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/helloworld.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/helloworld.gif new file mode 100644 index 0000000000..22cf15067a Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/helloworld.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/lora.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/lora.png new file mode 100644 index 0000000000..d2645f6ee5 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/lora.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/microphones.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/microphones.png new file mode 100644 index 0000000000..c476874a67 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/microphones.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-inference-2.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-inference-2.png new file mode 100644 index 0000000000..6cff6525d4 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-inference-2.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-data.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-data.png new file mode 100644 index 0000000000..696df8cefc Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-data.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-upload.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-upload.png new file mode 100644 index 0000000000..9101fc778a Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools-upload.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools.png new file mode 100644 index 0000000000..9b0a65e810 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ml-tools.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/model-speech.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/model-speech.png new file mode 100644 index 0000000000..f820a8e521 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/model-speech.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ntp.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ntp.png new file mode 100644 index 0000000000..d3dbe507e2 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ntp.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/openmv-down.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/openmv-down.png new file mode 100644 index 0000000000..78670e9876 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/openmv-down.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/person-detect-4.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/person-detect-4.gif new file mode 100644 index 0000000000..30a0dd5d69 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/person-detect-4.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/qr.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/qr.gif new file mode 100644 index 0000000000..e125d8bd6b Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/qr.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ready-connected.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ready-connected.png new file mode 100644 index 0000000000..a2dd31e3e1 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/ready-connected.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/resolutions-2.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/resolutions-2.png new file mode 100644 index 0000000000..66ac7e2113 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/resolutions-2.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/snapshot.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/snapshot.png new file mode 100644 index 0000000000..9256663055 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/snapshot.png differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/video-ani.gif b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/video-ani.gif new file mode 100644 index 0000000000..1b1e24ba8b Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/assets/video-ani.gif differ diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/content.md b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/content.md new file mode 100644 index 0000000000..d17f1f6fb3 --- /dev/null +++ b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/content.md @@ -0,0 +1,998 @@ +--- +title: 'Portenta Vision Shield User Manual' +difficulty: beginner +compatible-products: [portenta-vision-shield] +description: 'Learn about the hardware and software features of the Arduino® Portenta Vision Shield.' +tags: + - Camera + - Sensors + - Machine Learning +author: 'Christopher Méndez' +hardware: + - hardware/04.pro/shields/portenta-vision-shield +software: + - ide-v1 + - ide-v2 + - web-editor + - iot-cloud +--- + +## Overview + +This user manual will guide you through a practical journey covering the most interesting features of the Arduino Portenta Vision Shield. With this user manual, you will learn how to set up, configure and use this Arduino board. + +## Hardware and Software Requirements +### Hardware Requirements + +- [Portenta Vision Shield Ethernet](https://store.arduino.cc/collections/shields-carriers/products/arduino-portenta-vision-shield-ethernet) (x1) or [Portenta Vision Shield LoRa®](https://store.arduino.cc/collections/shields-carriers/products/arduino-portenta-vision-shield-lora%C2%AE) +- [Portenta H7](https://store.arduino.cc/products/portenta-h7) (x1) or [Portenta C33](https://store.arduino.cc/products/portenta-c33) (x1) +- [USB-C® cable cable](https://store.arduino.cc/products/usb-cable2in1-type-c) (x1) + +### Software Requirements + +- [OpenMV IDE](https://openmv.io/pages/download) +- [Arduino IDE 1.8.10+](https://www.arduino.cc/en/software), [Arduino IDE 2.0+](https://www.arduino.cc/en/software), or [Arduino Web Editor](https://create.arduino.cc/editor) +- To create custom Machine Learning models, the Machine Learning Tools add-on integrated into the [Arduino Cloud](https://create.arduino.cc/iot/) is needed. In case you do not have an Arduino Cloud account, you will need to create one first. + +## Product Overview + +The Arduino Portenta Vision Shield is an add-on board providing machine vision capabilities and additional connectivity to the Portenta family of Arduino boards, designed to meet the needs of industrial automation. The Portenta Vision Shield connects via a high-density connector to the Portenta boards with minimal hardware and software setup. + +***The Portenta Vision Shield has two hardware revisions, distinguished only by the onboard camera sensor; all other features of the shield remain identical across both revisions.*** + +- The Portenta Vision Shield (**Rev 1**) includes the **HM01B0** (1/11" 320 x 320 VGA 60FPS) CMOS camera module. +- The Portenta Vision Shield (**Rev 2**) includes the **HM0360** (1/6" 640 x 480 VGA 60FPS) CMOS camera module. + +The included camera module has been pre-configured to work with the OpenMV libraries provided by Arduino. Based on the specific application requirements, the Portenta Vision Shield is available in two configurations with either Ethernet or LoRa® connectivity. + +### Board Architecture Overview + +The Portenta Vision Shield LoRa® brings industry-rated features to your Portenta. This hardware add-on will let you run embedded computer vision applications, connect wirelessly via LoRa® to the Arduino Cloud or your own infrastructure, and activate your system upon the detection of sound events. + +![Vision Shield main components (top view)](assets/arch-top-c.png) +![Vision Shield main components (bottom view)](assets/arch-bottom.png) + +Here is an overview of the board's main components, as shown in the images above: + +- **Power Regulator**: the Portenta H7/C33 supplies 3.3 V power to the LoRa® module (ASX00026 only), Ethernet communication (ASX00021 only), Micro SD slot and dual microphones via the 3.3 V output of the high-density connectors. An onboard LDO regulator supplies a 2.8 V output (300 mA) for the camera module. + +- **Camera**: the Himax HM01B0 (320x320) and HM0360 (640x480) modules are very low-power cameras with a maximum of 60 FPS depending on the operating mode. Video data is transferred over a configurable 8-bit interface with support for frame and line synchronization. The module delivered with the Portenta Vision Shield is the monochrome version. Configuration is achieved via an I2C connection with the compatible Portenta boards microcontrollers. + + Himax modules offers very low-power image acquisition and provides the possibility to perform motion detection without main processor interaction. The “Always-on” operation provides the ability to turn on the main processor when movement is detected with minimal power consumption. + + ***The Portenta C33 is not compatible with the camera of the Portenta Vision Shield*** + +- **Digital Microphones**: the dual MP34DT05 digital MEMS microphones are omnidirectional and operate via a capacitive sensing element +with a high (64 dB) signal-to-noise ratio. The microphones have been configured to provide separate left and right audio over a single PDM stream. + + The sensing element, capable of detecting acoustic waves, is manufactured using a specialized silicon micromachining process dedicated to produce audio sensors. + +- **Micro SD Card Slot**: a Micro SD card slot is available under the Portenta Vision Shield board. Available libraries allow reading and +writing to FAT16/32 formatted cards + +- **Ethernet (ASX00021 Only)**: ethernet connector allows connecting to 10/100 Base TX networks using the Ethernet PHY available on the Portenta +board. + +- **LoRa® Module (ASX00026 Only)**: LoRa® connectivity is provided by the Murata CMWX1ZZABZ module. This module contains an STM32L0 processor +along with a Semtech SX1276 Radio. The processor is running on Arduino open-source firmware based on Semtech code. + +### Shield Environment Setup + +Connect the Vision Shield with a Portenta H7 through their High-Density connectors and verify they are correctly aligned. + +
+ +
+ +#### OpenMV IDE Setup + +Before you can start programming MicroPython scripts for the Vision Shield, you need to download and install the OpenMV IDE. + +Open the [OpenMV](https://openmv.io/pages/download) download page in your browser, download the latest version available for your operating system, and follow the instructions of the installer. + +![OpenMV Download Page](assets/openmv-down.png) + +Open the **OpenMV IDE** and connect the Portenta H7 to your computer via the USB cable if you have not done so yet. + +![The OpenMV IDE after starting it](assets/first-open.png) + +Click on the "connect" symbol at the bottom of the left toolbar. + +![Click the connect button to attach the Portenta H7 to the OpenMV IDE](assets/click-connect.png) + +If your Portenta H7 does not have the latest firmware, a pop-up will ask you to install it. Your board will enter in DFU mode and its green LED will start fading. + +Select `Install the latest release firmware`. This will install the latest OpenMV firmware on the H7. You can leave the option of erasing the internal file system unselected and click `OK` + +![Install the latest version of the OpenMV firmware](assets/first-connect.png) + +Portenta H7's green LED will start flashing while the OpenMV firmware is being uploaded to the board. A loading bar will start showing you the flashing progress. + +Wait until the green LED stops flashing and fading. You will see a message saying `DFU firmware update complete!` when the process is done. + +![Installing firmware on H7 board in OpenMV](assets/flashing.png) + +The board will start flashing its blue LED when it is ready to be connected. After confirming the completion dialog, the Portenta H7 should already be connected to the OpenMV IDE, otherwise, click the "connect" button (plug symbol) once again (the blue blinking should stop). + +![When the H7 is successfully connected a green play button appears](assets/ready-connected.png) + +While using the Portenta H7 with OpenMV, the RGB LED of the board can be used to inform the user about its current status. Some of the most important ones are the following: + +🟢 **Blinking Green:** Your Portenta H7 onboard bootloader is running. The onboard bootloader runs for a few seconds when your H7 is powered via USB to allow OpenMV IDE to reprogram your Portenta. + +🔵 **Blinking Blue:** Your Portenta H7 is running the default __main.py__ script onboard. + +If you overwrite the __main.py__ script on your Portenta H7, then it will run whatever code you loaded on it instead. + +***If the LED is blinking blue but OpenMV IDE cannot connect to your Portenta H7, please make sure you are connecting your Portenta H7 to your PC with a USB cable that supplies both data and power.*** + +⚪ **Blinking White:** Your Portenta H7 firmware is panicking because of a hardware failure. Please check that your Vision Shield's camera module is installed securely. + +***If you tap the Portenta H7 reset button once, the board resets. If you tap it twice, the board enters Device Firmware Upgrade (DFU) mode and its green LED starts blinking and fading.*** + +### Pinout + +![Vision Shield simple pinout](assets/ethernet-pinout.png) + +The full pinout is available and downloadable as PDF from the link below: + +- [Vision Shield full pinout](https://docs.arduino.cc/resources/pinouts/ABX00051-full-pinout.pdf) + +### Datasheet + +The complete datasheet is available and downloadable as PDF from the link below: + +- [Vision Shield datasheet](https://docs.arduino.cc/resources/datasheets/ASX00021-ASX00026-datasheet.pdf) + +### Schematics + +The complete schematics are available and downloadable as PDF from the links below: + +- [Vision Shield - Ethernet schematics](https://docs.arduino.cc/resources/schematics/ASX00021-schematics.pdf) +- [Vision Shield - LoRa® schematics](https://docs.arduino.cc/resources/schematics/ASX00026-schematics.pdf) + +### STEP Files + +The complete STEP files are available and downloadable from the link below: + +- [Vision Shield STEP files](https://docs.arduino.cc/static/c1c3c72a51d20228fe415ac8717615f6/visionShields-step.zip) + +## First Use + +### Hello World Example + +Working with camera modules, the `Hello World` classic example is not an LED blink but the simplest sketch to capture images. We will use this example to verify the board's connection to the IDEs and that the Vision Shield itself is working as expected. + +The following example script can be found on **File > Examples > HelloWorld > helloworld.py** in the OpenMV IDE. + +```python +import sensor +import time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + +while True: + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. +``` + +![Camera streaming demo](assets/helloworld.gif) + +From the above example script, we can highlight the main functions: + +- `sensor.set_pixformat()` lets you set the pixel format for the camera sensor. The Vision Shield is compatible with these: `sensor.GRAYSCALE`, and `sensor.BAYER`. + + To define the pixel format to any of the supported ones, just add it to the `set_pixformat` function argument. + +- `sensor.set_framesize()` lets you define the image frame size in terms of pixels. [Here](https://docs.openmv.io/library/omv.sensor.html#sensor.set_framesize) you can find all the different options. + + ![Different resolutions examples](assets/resolutions-2.png) + + Here are some tested settings for your camera setup: + + | **Resolution** | **Setting** | **Compatibility** | **Note** | + | :------------: | :---------------: | :---------------: | :--------------------: | + | 320 x 240 | `sensor.QVGA` | HM01B0 and HM0360 | | + | 320 x 320 | `sensor.B320X320` | HM01B0 | Full sensor resolution | + | 640 x 480 | `sensor.VGA` | HM0360 | Full sensor resolution | + +- `sensor.snapshot()` lets you take a picture and return the image so you can save it, stream it or process it. + +## Camera + +The Portenta Vision Shields's main feature is its onboard camera, based on the HM01B0 or HM0360 ultra low power CMOS image sensor. It is perfect for Machine Learning applications such as object detection, image classification, machine/computer vision, robotics, IoT, and more. + +![Onboard camera sensor](assets/camera.png) + +### HM01B0 Camera Features + +- Ultra-Low-Power Image Sensor designed for always-on vision devices and applications +- High-sensitivity 3.6 μ BrightSenseTM pixel technology Window, vertical flip and horizontal mirror readout +- Programmable black level calibration target, frame size, frame rate, exposure, analog gain (up to 8x) and digital gain (up to 4x) +- Automatic exposure and gain control loop with support for 50 Hz / 60 Hz flicker avoidance +- Motion Detection circuit with programmable ROI and detection threshold with digital output to serve as an interrupt + +**Supported Resolutions** + +- QQVGA (160x120) at 15, 30, and 60 FPS +- QVGA (320x240) at 15, 30 and 60 FPS +- B320X320 (320x320) at 15, 30 and 45 FPS + +**Power Consumption** +- < 1.1 mW QQVGA resolution at 30 FPS +- < 2 mW QVGA resolution at 30 FPS +- < 4 mW QVGA resolution at 60 FPS + +### HM0360 Camera Features + +- Ultra-Low-Power, high sensitivity, low noise VGA sensor +- On-chip auto exposure / gain and zone detection +- Automatic wake and sleep operation with programmable event interrupt to host processor +- Pre-metered exposure provides well exposed first frame and after extended sleep (blanking) period +- Embedded line provides metadata such as frame count, AE statistics, zone trigger and other interrupt event information + +**Supported Resolutions** + +- QQVGA (160x120) at 15, 30, and 60 FPS +- QVGA (320x240) at 15, 30 and 60 FPS +- VGA (640x480) at 15, 30 and 60 FPS + +**Power Consumption** + +- 140 µA QVGA resolution at 2 FPS +- 3.2 mA QVGA resolution at 60 FPS +- 7.8 mA VGA resolution at 60 FPS + +The Vision Shield is primarily intended to be used with the OpenMV MicroPython ecosystem. So, it's recommended to use this IDE for machine vision applications. + +### Snapshot Example + +The example code below lets you take a picture and save it on the Portenta H7 local storage or in a Micro SD card as `example.jpg`. + +```python +import sensor +import time +import machine + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. + +led = machine.LED("LED_BLUE") + +start = time.ticks_ms() +while time.ticks_diff(time.ticks_ms(), start) < 3000: + sensor.snapshot() + led.toggle() + +led.off() + +img = sensor.snapshot() +img.save("example.jpg") # or "example.bmp" (or others) + +raise (Exception("Please reset the camera to see the new file.")) +``` + +***If a Micro SD card is inserted into the Vision Shield, the snapshot will be stored there*** + +After the snapshot is taken, reset the board by pressing the reset button and the image will be on the board storage drive. + +![Snapshot saved in H7 local storage](assets/snapshot.png) + +### Video Recording Example + +The example code below lets you record a video and save it on the Portenta H7 local storage or in a Micro SD card as `example.mjpeg`. + +```python +import sensor +import time +import mjpeg +import machine + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time=2000) # Wait for settings take effect. + +led = machine.LED("LED_RED") + +led.on() +m = mjpeg.Mjpeg("example.mjpeg") + +clock = time.clock() # Create a clock object to track the FPS. +for i in range(50): + clock.tick() + m.add_frame(sensor.snapshot()) + print(clock.fps()) + +m.close() +led.off() + +raise (Exception("Please reset the camera to see the new file.")) +``` +We recommend you use [VLC](https://www.videolan.org/vlc/) to play the video. + +![Video saved in local storage](assets/video-ani.gif) + +### Sensor Control + +There are several functions that allow us to configure the behavior of the camera sensor and adapt it to our needs. + +**Gain**: the gain is related to the sensor sensitivity and affects how bright or dark the final image will be. + +With the following functions, you can control the camera gain: + +```python +sensor.set_auto_gain(True, gain_db_ceiling=16.0) # True = auto gain enabled, with a max limited to gain_db_ceiling parameter. +sensor.set_auto_gain(False, gain_db=8.0) # False = auto gain disabled, fixed to gain_db parameter. +``` +![Auto Gain example](assets/gain.gif) + +**Orientation**: flip the image captured to meet your application's needs. + +With the following functions, you can control the image orientation: + +```python +sensor.set_hmirror(True) # Enable horizontal mirror | undo the mirror if False +sensor.set_vflip(True) # Enable the vertical flip | undo the flip if False +``` + +You can find complete `Sensor Control` examples in **File > Examples > Camera > Sensor Control** of the OpenMV IDE. + +### Bar and QR Codes + +The Vision Shield is ideal for production line inspections, in these examples, we are going to be locating and reading bar codes and QR codes. + +#### Bar Codes + +This example code can be found in **File > Examples > Barcodes** in the OpenMV IDE. + +```python +import sensor +import image +import time +import math + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # High Res! +sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... +clock = time.clock() + +# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's. + +def barcode_name(code): + if code.type() == image.EAN2: + return "EAN2" + if code.type() == image.EAN5: + return "EAN5" + if code.type() == image.EAN8: + return "EAN8" + if code.type() == image.UPCE: + return "UPCE" + if code.type() == image.ISBN10: + return "ISBN10" + if code.type() == image.UPCA: + return "UPCA" + if code.type() == image.EAN13: + return "EAN13" + if code.type() == image.ISBN13: + return "ISBN13" + if code.type() == image.I25: + return "I25" + if code.type() == image.DATABAR: + return "DATABAR" + if code.type() == image.DATABAR_EXP: + return "DATABAR_EXP" + if code.type() == image.CODABAR: + return "CODABAR" + if code.type() == image.CODE39: + return "CODE39" + if code.type() == image.PDF417: + return "PDF417" + if code.type() == image.CODE93: + return "CODE93" + if code.type() == image.CODE128: + return "CODE128" + + +while True: + clock.tick() + img = sensor.snapshot() + codes = img.find_barcodes() + for code in codes: + img.draw_rectangle(code.rect()) + print_args = ( + barcode_name(code), + code.payload(), + (180 * code.rotation()) / math.pi, + code.quality(), + clock.fps(), + ) + print( + 'Barcode %s, Payload "%s", rotation %f (degrees), quality %d, FPS %f' + % print_args + ) + if not codes: + print("FPS %f" % clock.fps()) +``` + +The format, payload, orientation and quality will be printed out in the Serial Monitor when a bar code becomes readable. + +![Bar codes reading](assets/bar-codes.gif) + +#### QR Codes + +This example code can be found in **File > Examples > Barcodes** in the OpenMV IDE. + +```python +import sensor +import time + +sensor.reset() +sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.QVGA) +sensor.skip_frames(time=2000) +sensor.set_auto_gain(False) # must turn this off to prevent image washout... +clock = time.clock() + +while True: + clock.tick() + img = sensor.snapshot() + img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. + for code in img.find_qrcodes(): + img.draw_rectangle(code.rect(), color=(255, 255, 0)) + print(code) + print(clock.fps()) +``` +The coordinates, size, and payload will be printed out in the Serial Monitor when a QR code becomes readable. + +![QR codes reading](assets/qr.gif) + +### Face Tracking + +You can track faces using the built-in FOMO face detection model. This example can be found in **File > Examples > Machine Learning > TensorFlow > tf_object_detection.py**. + +This script will draw a circle on each detected face and will print their coordinates in the Serial Monitor. + +```python +import sensor +import time +import ml +from ml.utils import NMS +import math +import image + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +min_confidence = 0.4 +threshold_list = [(math.ceil(min_confidence * 255), 255)] + +# Load built-in FOMO face detection model +model = ml.Model("fomo_face_detection") +print(model) + +# Alternatively, models can be loaded from the filesystem storage. +# model = ml.Model('.tflite', load_to_fb=True) +# labels = [line.rstrip('\n') for line in open("labels.txt")] + +colors = [ # Add more colors if you are detecting more than 7 types of classes at once. + (255, 0, 0), + (0, 255, 0), + (255, 255, 0), + (0, 0, 255), + (255, 0, 255), + (0, 255, 255), + (255, 255, 255), +] + + +# FOMO outputs an image per class where each pixel in the image is the centroid of the trained +# object. So, we will get those output images and then run find_blobs() on them to extract the +# centroids. We will also run get_stats() on the detected blobs to determine their score. +# The Non-Max-Supression (NMS) object then filters out overlapping detections and maps their +# position in the output image back to the original input image. The function then returns a +# list per class which each contain a list of (rect, score) tuples representing the detected +# objects. +def fomo_post_process(model, inputs, outputs): + n, oh, ow, oc = model.output_shape[0] + nms = NMS(ow, oh, inputs[0].roi) + for i in range(oc): + img = image.Image(outputs[0][0, :, :, i] * 255) + blobs = img.find_blobs( + threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1 + ) + for b in blobs: + rect = b.rect() + x, y, w, h = rect + score = ( + img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0 + ) + nms.add_bounding_box(x, y, x + w, y + h, score, i) + return nms.get_bounding_boxes() + + +clock = time.clock() +while True: + clock.tick() + + img = sensor.snapshot() + + for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)): + if i == 0: + continue # background class + if len(detection_list) == 0: + continue # no detections for this class? + + print("********** %s **********" % model.labels[i]) + for (x, y, w, h), score in detection_list: + center_x = math.floor(x + (w / 2)) + center_y = math.floor(y + (h / 2)) + print(f"x {center_x}\ty {center_y}\tscore {score}") + img.draw_circle((center_x, center_y, 12), color=colors[i]) + + print(clock.fps(), "fps", end="\n") +``` +![Face tracking example running](assets/face.gif) + +You can load different **Machine Learning** models for detecting other objects, for example, persons. + +Download the `.tflite` and `.txt` files from this [repository](https://github.com/openmv/tensorflow-lib/tree/master/libtf/models) and copy them to the Portenta H7 local storage. + +Use the following example script to run the **person detection** model. + +```python +import sensor +import time +import ml + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +model = ml.Model('person_detection.tflite', load_to_fb=True) +labels = [line.rstrip('\n') for line in open("person_detection.txt")] +sorted_labels = sorted(labels, reverse=False) + +clock = time.clock() +while True: + clock.tick() + + img = sensor.snapshot() + + sorted_list = sorted( + zip(sorted_labels, model.predict([img])[0].flatten().tolist()), key=lambda x: x[1] + ) + for i in range(len(sorted_labels)): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + + print(clock.fps(), "fps") +``` + +When a person is in the field of view of the camera, you should see the inference result for `person` rising above 70% of certainty. + +![Person detection example running](assets/person-detect-4.gif) + +## Microphone + +The Portenta Vision Shield features two omnidirectional microphones, based on the MP34DT05 ultra-compact, low-power, and digital MEMS microphone. + +![Vision Shield omnidirectional microphones](assets/microphones.png) + +**Features:** +- AOP = 122.5 dB SPL +- 64 dB signal-to-noise ratio +- Omnidirectional sensitivity +- –26 dBFS ± 1 dB sensitivity + +### FFT Example + +You can analyze frequencies present in sounds alongside their harmonic features using this example. + +By measuring the sound level on each microphone we can easily know from where the sound is coming, an interesting capability for robotics and AIoT applications. + +```python +import image +import audio +from ulab import numpy as np +from ulab import utils + +CHANNELS = 2 +SIZE = 512 // (2 * CHANNELS) + +raw_buf = None +fb = image.Image(SIZE + 50, SIZE, image.RGB565, copy_to_fb=True) +audio.init(channels=CHANNELS, frequency=16000, gain_db=24, highpass=0.9883) + + +def audio_callback(buf): + # NOTE: do Not call any function that allocates memory. + global raw_buf + if raw_buf is None: + raw_buf = buf + + +# Start audio streaming +audio.start_streaming(audio_callback) + + +def draw_fft(img, fft_buf): + fft_buf = (fft_buf / max(fft_buf)) * SIZE + fft_buf = np.log10(fft_buf + 1) * 20 + color = (222, 241, 84) + for i in range(0, SIZE): + img.draw_line(i, SIZE, i, SIZE - int(fft_buf[i]), color, 1) + + +def draw_audio_bar(img, level, offset): + blk_size = SIZE // 10 + color = (214, 238, 240) + blk_space = blk_size // 4 + for i in range(0, int(round(level / 10))): + fb.draw_rectangle( + SIZE + offset, + SIZE - ((i + 1) * blk_size) + blk_space, + 20, + blk_size - blk_space, + color, + 1, + True, + ) + + +while True: + if raw_buf is not None: + pcm_buf = np.frombuffer(raw_buf, dtype=np.int16) + raw_buf = None + + if CHANNELS == 1: + fft_buf = utils.spectrogram(pcm_buf) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) + else: + fft_buf = utils.spectrogram(pcm_buf[0::2]) + l_lvl = int((np.mean(abs(pcm_buf[1::2])) / 32768) * 100) + r_lvl = int((np.mean(abs(pcm_buf[0::2])) / 32768) * 100) + + fb.clear() + draw_fft(fb, fft_buf) + draw_audio_bar(fb, l_lvl, 0) + if CHANNELS == 2: + draw_audio_bar(fb, r_lvl, 25) + fb.flush() + +# Stop streaming +audio.stop_streaming() +``` + +With this script running you will be able to see the Fast Fourier Transform result in the image viewport. Also, the sound level on each microphone channel. + +![FFT example running](assets/fft.gif) + +### Speech Recognition Example + +You can easily implement sound/voice recognition applications using Machine Learning on the edge, this means that the Portenta H7 plus the Vision Shield can run these algorithms locally. + +Use the following script to run the example. It can also be found on **File > Examples > Audio > micro_speech.py** in the OpenMV IDE. + +```python +import time +from ml.apps import MicroSpeech + + +def callback(label, scores): + print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}') + + +# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the +# micro speech module for audio preprocessing and speech recognition, respectively. The +# user can override both by passing two models: +# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) +speech = MicroSpeech() + +# Starts the audio streaming and processes incoming audio to recognize speech commands. +# If a callback is passed, listen() will loop forever and call the callback when a keyword +# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it +# returns if the timeout expires before detecting a keyword. +speech.listen(callback=callback, threshold=0.70) +``` + +In the example from above you can notice that there is no model defined explicitly, this is because it will use the default built-in model pre-trained to recognize the **yes** and **no** keywords. + +You can run the script and say the keywords, if any is recognized, the *Serial Terminal* will print the heard word and the inference scores. + +#### Custom Speech Recognition Model + +You can easily run custom speech recognition models also. To show you how, we are going to replicate the **yes** and **no** example but this time using the `.tflite` model file. + +First, download the `.tflite` [model](https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite) and copy it to the H7 local storage. + +![Speech recognition model directory](assets/model-speech.png) + +Copy and paste the following script based in the original example: + +```python +import time +import ml +from ml.apps import MicroSpeech + +labels = ["Silence", "Unknown", "Yes", "No"] + +def callback(label, scores): + print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}') + +speech = MicroSpeech(micro_speech=ml.Model('model.tflite', load_to_fb=True), labels=labels) + +speech.listen(callback=callback, threshold=0.70) +``` + +As you can see, there are some differences between the original example from which we can highlight the following: + +- The `ml` module was imported +- A labels list was created including the model labels in a specific order +- The `MicroSpeech()` function has been populated with the model and labels list as arguments. + +Now, just say `yes` or `no` and you will see the inference result in the OpenMV Serial Terminal just as with the original example. + +![Speech recognition example](assets/ml-inference-2.png) + +***If you want to create a custom model `.tflite` file, you can do it with your own keywords or sounds using [Edge Impulse](https://docs.edgeimpulse.com/docs/edge-ai-hardware/mcu/arduino-portenta-h7).*** + +## Machine Learning Tool + +The main features of the Portenta Vision Shield are the audio and video capabilities. This makes it a perfect option for almost infinite machine-learning applications. + +Creating this type of application has never been easier thanks to our Machine Learning Tool powered by Edge Impulse®, where we can easily create in a __No-Code__ environment, __Audio__, __Motion__, __Proximity__ and __Image__ processing models. + +The first step to start creating awesome artificial intelligence and machine learning projects is to create an [Arduino Cloud](https://cloud.arduino.cc/home/) account. + +There you will find a dedicated integration called __Machine Learning Tools__. + +![Machine Learning Tools on Arduino Cloud](assets/ml-tools.png) + +Once in, create a new project and give it a name. + +![Creating a new project](assets/create-pro.png) + +Enter your newly created project and the landing page will look like the following: + +![Vision Shield project page](assets/ei-landing.png) + +### Edge Impulse® Environment Setup + +Now, it is time to set up the __Edge Impulse®__ environment on your PC. For this, follow [these](https://docs.edgeimpulse.com/docs/tools/edge-impulse-cli/cli-installation) instructions to install the __Edge Impulse CLI__. + +***For Windows users: make sure to install [Visual Studio Community](https://visualstudio.microsoft.com/downloads/) and [Visual Studio Build Tools](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2022).*** + +- Download and install the latest __Arduino CLI__ from [here](https://arduino.github.io/arduino-cli/0.35/installation/). ([Video Guide for Windows](https://www.youtube.com/watch?v=1jMWsFER-Bc)) + +- Download the [latest Edge Impulse® firmware](https://cdn.edgeimpulse.com/firmware/arduino-portenta-h7.zip) for the Portenta H7, and unzip the file. + +- Open the flash script for your operating system (`flash_windows.bat`, `flash_mac.command` or `flash_linux.sh`) to flash the firmware. + +- To test if the __Edge Impulse CLI__ was installed correctly, open the __Command Prompt__ or your favorite terminal and run: + + `edge-impulse-daemon` + + If everything goes okay, you should be asked for your Edge Impulse account credentials. + + ![Edge Impulse Daemon](assets/cmd.png) + +- Enter your account username or e-mail address and your password. +- Select the project you have created on the Arduino ML Tools, it will be listed. +- Give your device a name and wait for it to connect to the platform. + + ![H7 + Vision Shield correctly connected to ML Tools](assets/cmd-connected.png) + +### Uploading Sensor Data + +The first thing to start developing a machine learning project is to create a _dataset_ for your model. This means, uploading _data_ to your model from the Vision Shield sensors. + +To upload data from your Vision Shield on the Machine Learning Tools platform, navigate to __Data Acquisition__. + +![Data Acquisition section](assets/ml-tools-upload.png) + +In this section, you will be able to select the Vision Shield onboard sensors individually. + +This is the supported sensors list: +- Built-in microphone +- Camera (320x240) +- Camera (160x160) +- Camera (128x96) + +![Samples uploaded using the Vision Shield](assets/ml-tools-data.png) + +Now you know how to start with our __Machine Learning Tools__ creating your dataset from scratch, you can get inspired by some of our ML projects listed below: + +- [Image Classification with Edge Impulse®](https://docs.arduino.cc/tutorials/portenta-vision-shield/custom-machine-learning-model) (Article). + +## Ethernet (ASX00021) + +The **Portenta Vision Shield - Ethernet** gives you the possibility of connecting your Portenta H7 board to the internet using a wired connection. + +![Ethernet cable connected](assets/ethernet-connect.png) + +First, connect the Vision Shield - Ethernet to the Portenta H7. Now connect the USB-C® cable to the Portenta H7 and your computer. Lastly, connect the Ethernet cable to the Portenta Vision Shield's Ethernet port and your router or modem. + +Now you are ready to test the connectivity with the following MicroPython script. This example lets you know if an Ethernet cable is connected successfully to the shield. + +```python +import network +import time + +lan = network.LAN() + +# Make sure Eth is not in low-power mode. +lan.config(low_power=False) + +# Delay for auto negotiation +time.sleep(3.0) + +while True: + print("Cable is", "connected." if lan.status() else "disconnected.") + time.sleep(1.0) +``` + If the physical connection is detected, in the OpenMV Serial Monitor, you will see the following message: + + `Cable is connected.` + +Once the connection is confirmed, we can try to connect to the internet using the example script below. + +This example lets you gather the current time from an NTP server. + +```python +import network +import socket +import struct +import time + +TIMESTAMP = 2208988800 + (3600*4) # (3600*4) is used to set the Time Zone (UTC-4) + +if time.gmtime(0)[0] == 2000: + TIMESTAMP += 946684800 + +# Create new socket +client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + +# Get addr info via DNS +addr = socket.getaddrinfo("pool.ntp.org", 123)[0][4] + +# Send query +client.sendto("\x1b" + 47 * "\0", addr) +data, address = client.recvfrom(1024) + +# Print time +t = struct.unpack(">IIIIIIIIIIII", data)[10] - TIMESTAMP +print("Year:%d Month:%d Day:%d Time: %d:%d:%d" % (time.localtime(t)[0:6])) +``` +Run the script and the current date and time will be printed in the OpenMV IDE Serial Monitor. + +![Ethernet connection example script](assets/ntp.png) + +***If you want to learn more, check the other Ethernet examples in the OpenMV IDE.*** + +## LoRa® (ASX00026) + +The **Vision Shield - LoRa®** can extend our project connectivity by leveraging it LoRa® module for long-range communication in remote areas with a lack of internet access. Powered by the Murata CMWX1ZZABZ module which contains an STM32L0 processor along with a Semtech SX1276 Radio. + +![LoRa® antenna connection](assets/antenna.png) + +To test the LoRa® connectivity, first, connect the Vision Shield - LoRa® to the Portenta H7. Attach the LoRa® antenna to its respective connector. Now connect the USB-C® cable to the Portenta H7 and your computer. + +***Follow this [guide](https://docs.arduino.cc/tutorials/portenta-vision-shield/things-network-openmv) to learn how to set up and create your __end device__ on The Things Network.*** + +Important hardware LoRa® configurations are listed below: + +| **Setting** | **Compatibility** | +| :-----------------: | :---------------: | +| LoRaWAN MAC Version | V1.0.2 | +| Class | A or C | + +The following MicroPython script lets you connect to The Things Network using LoRaWAN® and send a `Hello World` message to it. + +```python +from lora import * + +lora = Lora(band=BAND_AU915, poll_ms=60000, debug=False) + +print("Firmware:", lora.get_fw_version()) +print("Device EUI:", lora.get_device_eui()) +print("Data Rate:", lora.get_datarate()) +print("Join Status:", lora.get_join_status()) + +# Example keys for connecting to the backend +appEui = "*****************" # now called JoinEUI +appKey = "*****************************" + +try: + lora.join_OTAA(appEui, appKey) + # Or ABP: + # lora.join_ABP(devAddr, nwkSKey, appSKey, timeout=5000) +# You can catch individual errors like timeout, rx etc... +except LoraErrorTimeout as e: + print("Something went wrong; are you indoor? Move near a window and retry") + print("ErrorTimeout:", e) +except LoraErrorParam as e: + print("ErrorParam:", e) + +print("Connected.") +lora.set_port(3) + +try: + if lora.send_data("HeLoRA world!", True): + print("Message confirmed.") + else: + print("Message wasn't confirmed") + +except LoraErrorTimeout as e: + print("ErrorTimeout:", e) + +# Read downlink messages +while True: + if lora.available(): + data = lora.receive_data() + if data: + print("Port: " + data["port"]) + print("Data: " + data["data"]) + lora.poll() + sleep_ms(1000) +``` + +Find the frequency used in your country for **The Things Network** on this [list](https://www.thethingsnetwork.org/docs/lorawan/frequencies-by-country/) and modify the parameter in the script within the following function. + +```python +lora = Lora(band=BAND_AU915, poll_ms=60000, debug=False) # change the band with yours e.g BAND_US915 +``` +Define your application `appEUI` and `appKey` in the MicroPython script so the messages are correctly authenticated by the network server. + +```python +appEui = "*****************" # now called JoinEUI +appKey = "*****************************" +``` + +After configuring your credentials and frequency band, you can run the script. You must be in an area with LoRaWAN® coverage, if not, you should receive an alert from the code advising you to move near a window. + +![LoRaWAN® uplink received on TTN](assets/lora.png) + +***You can set up your own LoRaWAN® network using our [LoRa® gateways](https://www.arduino.cc/pro/lora-gateways/)*** + + +## Support + +If you encounter any issues or have questions while working with the Vision Shield, we provide various support resources to help you find answers and solutions. + +### Help Center + +Explore our [Help Center](https://support.arduino.cc/hc/en-us), which offers a comprehensive collection of articles and guides for the Vision Shield. The Arduino Help Center is designed to provide in-depth technical assistance and help you make the most of your device. + +- [Vision Shield Help Center page](https://support.arduino.cc/hc/en-us/sections/360004767859-Portenta-Family) + +### Forum + +Join our community forum to connect with other Portenta Vision Shield users, share your experiences, and ask questions. The forum is an excellent place to learn from others, discuss issues, and discover new ideas and projects related to the Vision Shield. + +- [Vision Shield category in the Arduino Forum](https://forum.arduino.cc/c/hardware/portenta/portenta-vision-shield/177) + +### Contact Us + +Please get in touch with our support team if you need personalized assistance or have questions not covered by the help and support resources described before. We're happy to help you with any issues or inquiries about the Vision Shield. + +- [Contact us page](https://www.arduino.cc/en/contact-us/) + diff --git a/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/hero-banner.png b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/hero-banner.png new file mode 100644 index 0000000000..f077b919f8 Binary files /dev/null and b/content/hardware/04.pro/shields/portenta-vision-shield/tutorials/user-manual/hero-banner.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/1000-Hz.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/1000-Hz.png new file mode 100644 index 0000000000..710e9d4b79 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/1000-Hz.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/500-Hz.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/500-Hz.png new file mode 100644 index 0000000000..f6dce8f95f Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/500-Hz.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/adc-input.svg b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/adc-input.svg new file mode 100644 index 0000000000..9c01902d33 --- /dev/null +++ b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/adc-input.svg @@ -0,0 +1,878 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/alexa.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/alexa.png new file mode 100644 index 0000000000..a7f3af554a Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/alexa.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/digital-io-mosfet.svg b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/digital-io-mosfet.svg new file mode 100644 index 0000000000..ecb8823014 --- /dev/null +++ b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/digital-io-mosfet.svg @@ -0,0 +1,916 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/dsp-install.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/dsp-install.png new file mode 100644 index 0000000000..5783932098 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/dsp-install.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/edge-impulse.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/edge-impulse.png new file mode 100644 index 0000000000..8d87d61ab8 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/edge-impulse.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/fw_update.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/fw_update.png new file mode 100644 index 0000000000..b59eb42a36 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/fw_update.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/imu_gyro.gif b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/imu_gyro.gif new file mode 100644 index 0000000000..42f36415c3 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/imu_gyro.gif differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/ndp.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/ndp.png new file mode 100644 index 0000000000..1d7df26958 Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/ndp.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nicla-voice-pinout.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nicla-voice-pinout.png index c1bdffcd9a..b2076ef4b6 100644 Binary files a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nicla-voice-pinout.png and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nicla-voice-pinout.png differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.rar b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.rar deleted file mode 100644 index 97890cde80..0000000000 Binary files a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.rar and /dev/null differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.zip b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.zip new file mode 100644 index 0000000000..198d2a6a1f Binary files /dev/null and b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/nv_acc_gyro_test.zip differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-1.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-1.png deleted file mode 100644 index 0d06b2ab5e..0000000000 Binary files a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-1.png and /dev/null differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-14.gif b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-14.gif deleted file mode 100644 index 5dadceed69..0000000000 Binary files a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-14.gif and /dev/null differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-4.png b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-4.png deleted file mode 100644 index 47acb89f0a..0000000000 Binary files a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/assets/user-manual-4.png and /dev/null differ diff --git a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/content.md b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/content.md index 8f2f89d09d..50999eda55 100644 --- a/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/content.md +++ b/content/hardware/06.nicla/boards/nicla-voice/tutorials/user-manual/content.md @@ -8,7 +8,7 @@ tags: - Cheat sheet - RGB - Communication -author: 'José Bagur and Benjamin Dannegård' +author: 'José Bagur, Benjamin Dannegård and Christopher Méndez' hardware: - hardware/06.nicla/boards/nicla-voice software: @@ -61,7 +61,7 @@ Here is an overview of the board's architecture's main components shown in the i The **Arduino Mbed OS Nicla Boards** core contains the libraries and examples you need to work with the board's components, such as its IMU, magnetometer, and onboard microphone. To install the core for Nicla boards, navigate to **Tools > Board > Boards Manager** or click the Boards Manager icon in the left tab of the IDE. In the Boards Manager tab, search for `nicla` and install the latest `Arduino Mbed OS Nicla Boards` version. -![Installing the Arduino Mbed OS Nicla Boards core in the Arduino IDE bootloader](assets/user-manual-1.png) +![Installing the Arduino Mbed OS Nicla Boards core in the Arduino IDE bootloader](assets/dsp-install.png) ### Pinout @@ -128,52 +128,61 @@ nicla::disableCharging(); ### NDP120 Processor Firmware Update +![NDP Syntiant AI Processor](assets/ndp.png) + It is recommended to update the NDP120 processor firmware and the built-in speech recognition model to the latest release. Follow these three steps to complete the update process: -1. Upload the `Syntiant_upload_fw_ymodem` sketch. This sketch can be found in the board's built-in examples by navigating to **File -> Examples -> NDP -> Syntiant_upload_fw_ymodem**. **Remember to select the board in the Arduino IDE first before navigating to the examples**. -2. After uploading the sketch, **format your board's external Flash memory** before uploading the updated NDP120 processor firmwares files. You can do this by navigating to the Arduino IDE Serial Monitor and typing `F` and then Enter. -3. Extract [this .zip file](assets/nicla_voice_uploader_and_firmwares.zip), which contains the compiled uploaders for various operating systems, and the updated NDP120 processor firmware and speech recognition model, in a known location on your computer. -4. Open a new terminal in the location where the .zip file was extracted and execute the following command: +1. Select the Nicla Voice board in the Arduino IDE so you can have access to the board built-in examples. +2. Upload the `Syntiant_upload_fw_ymodem` sketch. This sketch can be found in the board's built-in examples by navigating to **File > Examples > NDP > Syntiant_upload_fw_ymodem**. +3. After uploading the sketch, **format your board's external Flash memory** before uploading the updated NDP120 processor firmware files. You can do this by navigating to the Arduino IDE Serial Monitor and typing `F` and then `Enter`. +4. Extract [this .zip file](assets/nicla_voice_uploader_and_firmwares.zip), which contains the compiled uploaders for various operating systems, and the updated NDP120 processor firmware and speech recognition model, in a known location on your computer. +5. Open a new terminal in the location where the .zip file was extracted and execute the following command: ```bash - syntiant-uploader send -m "Y" -w "Y" -p $portName $filename + syntiant-uploader- send -m "Y" -w "Y" -p $portName $filename ``` - Replace `portName` and `filename` with the relevant information. Three different files must be uploaded to the board by executing the following three commands, for example in Windows the commands are the following: + Replace `portName` and `filename` with the relevant information. Three different files must be uploaded to the board by executing the following three commands, for example on **Windows** the commands are the following: ```bash - ./syntiant-uploader send -m "Y" -w "Y" -p COM6 mcu_fw_120_v91.synpkg + ./syntiant-uploader-win send -m "Y" -w "Y" -p COM12 mcu_fw_120_v91.synpkg ``` ```bash - ./syntiant-uploader send -m "Y" -w "Y" -p COM6 dsp_firmware_v91.synpkg + ./syntiant-uploader-win send -m "Y" -w "Y" -p COM12 dsp_firmware_v91.synpkg ``` ```bash - ./syntiant-uploader send -m "Y" -w "Y" -p COM6 model_name.synpkg + ./syntiant-uploader-win send -m "Y" -w "Y" -p COM12 alexa_334_NDP120_B0_v11_v91.synpkg ``` + ***As we are using Windows, the command used is `syntiant-uploader-win`, use your OS respective one.*** + Ensure all executed commands return a `filename sent successful` message in the console, as shown in the image below. - ![Uploader feedback messages](assets/user-manual-4.png) + ![Uploader feedback messages](assets/fw_update.png) After uploading the three files, your board's firmware is updated to the latest release and ready to be used. #### External Memory Format -

Your board NDP120 processor files (firmware and models) are stored in your board's external Flash memory. It is recommended to **format your Nicla Voice external Flash memory** every time you are going to update the processor firmware or when you are going to update/add models to the external Flash memory. Follow these steps to perform the external memory format process: -1. Upload the `Syntiant_upload_fw_ymodem` sketch. This sketch can be found in the board's built-in examples by navigating to **File -> Examples -> NDP -> Syntiant_upload_fw_ymodem**. **Remember to select the board in the Arduino IDE first before navigating to the examples**. -2. After uploading the sketch, navigate to the IDE's Serial Monitor, type `F`, and press `Enter`. Your board's external memory should be formatted now, you can confirm this by typing an `L` and pressing `Enter`. +1. Select the Nicla Voice board in the Arduino IDE so you can have access to the board built-in examples. +2. Upload the `Syntiant_upload_fw_ymodem` sketch. This sketch can be found in the board's built-in examples by navigating to **File > Examples > NDP > Syntiant_upload_fw_ymodem**. +3. After uploading the sketch, navigate to the IDE's Serial Monitor, type `F`, and press `Enter`. Your board's external memory should be formatted now, you can confirm this by typing an `L` and pressing `Enter`. After completing this process, you can upload NDP processor's firmware and model files to your board's external memory without issues as explained before. ### Built-in Speech Recognition Example -The speech recognition example can be found in the board's built-in examples by navigating to **File -> Examples -> NDP -> AlexaDemo**. After successfully updating the NDP120 processor firmware and the speech recognition model to the latest release, we can upload the speech recognition example to the Nicla Voice. To test the example, say "Alexa"; this should make the onboard LED of the Nicla Voice blink blue if the keyword "Alexa" is recognized. If there is no response from the board, try speaking from a closer proximity or louder. You should also see in the Serial Monitor if the word "Alexa" was detected, as shown in the image below: +The speech recognition example can be found in the board's built-in examples by navigating to **File > Examples > NDP > AlexaDemo**. + +![Alexa speech recognition example](assets/alexa.png) + +After successfully updating the NDP120 processor firmware and the speech recognition model to the latest release, we can upload the speech recognition example to the Nicla Voice. To test the example, say "Alexa"; this should make the onboard LED of the Nicla Voice blink blue if the keyword "Alexa" is recognized. If there is no response from the board, try speaking from a closer proximity or louder. You should also see in the Serial Monitor if the word "Alexa" was detected, as shown in the image below: ![AlexaDemo example feedback in the Arduino IDE Serial Monitor](assets/user-manual-5.png) @@ -192,31 +201,44 @@ The Nicla Voice has **two analog input pins**, mapped as follows: | `ADC1`/`P0_02` | `A0` | | `ADC2`/`P0_30` | `A1` | -Both pins can be used through the built-in functions of the Arduino programming language. The example code shown below reads the voltage value from a potentiometer connected to `A0` and displays it on the IDE Serial Monitor: +Both pins can be used through the built-in functions of the Arduino programming language. + +Nicla boards ADC can be configured to 8, 10 or 12 bits defining the argument of the following function respectively (default is 10 bits): + +```arduino +analogReadResolution(12); // ADC resolution set to 12 bits (0-4095) +``` + +***The Nicla boards ADC reference voltage is fixed to 1.8V, this means that it will map the ADC range from 0 to 1.8 volts.*** + +The example code shown below reads the voltage value from a potentiometer connected to `A0` and displays it on the IDE Serial Monitor. To understand how to properly connect a potentiometer to the Nicla Voice pins, take the following image as a reference: + +![ADC input example wiring](assets/adc-input.svg) ```arduino -// Define the potentiometer pin and variable to store its value -int potentiometerPin = A0; -int potentiometerValue = 0; +#include "Nicla_System.h" + +int sensorPin = A0; // select the input pin for the potentiometer +int sensorValue = 0; // variable to store the value coming from the sensor void setup() { - // Initialize Serial communication - Serial1.begin(9600); + + analogReadResolution(12); // ADC bits configuration + nicla::begin(); // Nicla peripherals initialization, this enables the VDDIO_EXT 3.3V output. + Serial.begin(115200); // Serial initialization } void loop() { - // Read the voltage value from the potentiometer - potentiometerValue = analogRead(potentiometerPin); - - // Print the potentiometer voltage value to the Serial Monitor - Serial1.print("- Potentiometer voltage value: "); - Serial1.println(potentiometerValue); - - // Wait for 1000 milliseconds + // read the value from the sensor: + sensorValue = analogRead(sensorPin); + // print the value + Serial.println(sensorValue); delay(1000); } ``` +***The ADC inputs support 3.3V even when the ADC reference is 1.8V. In this perspective, the ADC will not sense any change from 1.8V and above.*** + ### Digital Pins The Nicla Voice has **twelve digital pins**, mapped as follows: @@ -238,7 +260,11 @@ The Nicla Voice has **twelve digital pins**, mapped as follows: Notice that analog pins `A0` and `A1` (`P0_02` and `P0_30`) can also be used as digital pins. Please, refer to the [board pinout section](#pins) of the user manual to find them on the board. -The digital pins of the Nicla Voice can be used as inputs or outputs through the built-in functions of the Arduino programming language. The configuration of a digital pin is done in the `setup()` function with the built-in function `pinMode()` as shown below: +The digital pins of the Nicla Voice can be used as inputs or outputs through the built-in functions of the Arduino programming language. + +***The Nicla Voice digital I/O's are low power, so to drive output devices like LEDs, resistive loads, buzzers, etc, it is recommended to use a MOSFET driver or a buffer to guarantee the required current flow. Learn more about the Nicla I/O's considerations [here](https://docs.arduino.cc/learn/hardware/nicla-form-factor).*** + +The configuration of a digital pin is done in the `setup()` function with the built-in function `pinMode()` as shown below: ```arduino // Pin configured as an input @@ -270,7 +296,11 @@ digitalWrite(pin, LOW); The example code shown below uses digital pin `3` to control an LED and reads the state of a button connected to digital pin `2`: +![Digital I/O example wiring](assets/digital-io-mosfet.svg) + ```arduino +#include "Nicla_System.h" + // Define button and LED pin int buttonPin = 2; int ledPin = 3; @@ -279,6 +309,9 @@ int ledPin = 3; int buttonState = 0; void setup() { + + nicla::begin(); // Nicla peripherals initialization, this enables the VDDIO_EXT 3.3V output. + // Configure button and LED pins pinMode(buttonPin, INPUT_PULLUP); pinMode(ledPin, OUTPUT); @@ -307,42 +340,88 @@ void loop() { ``` ### PWM Pins -Most digital and analog pins of the Nicla Voice can be used as PWM (Pulse Width Modulation) pins. This functionality of the Nicla Voice pins can be used with the built-in function `analogWrite()` as shown below: +Most digital and analog pins of the Nicla Voice can be used as PWM (Pulse Width Modulation) pins. This functionality of the Nicla Voice pins can be used with the built-in function `analogWrite()` as shown below: ```arduino analogWrite(pin, value); ``` -The example code shown below uses digital pin `9` PWM functionality to control the brightness of an LED connected to it: +By default, the output resolution is 8 bits, so the output value should be between 0 and 255. To set a greater resolution, do it using the built-in function `analogWriteResolution` as shown below: ```arduino -// Define the LED pin, brightness, and fade amount variables -int ledPin = 9; -int brightness = 0; -int fadeAmount = 5; +analogWriteResolution(bits); +``` + +Here is a complete example code that outputs a 50% duty-cycle PWM signal through pin 3 of the Nicla Voice: + +```arduino +#include "Nicla_System.h" + +#define out_pwm 3 // Nicla Voice pin 3 void setup() { - // Configure the LED pin as an output - pinMode(ledPin, OUTPUT); + // put your setup code here, to run once: + nicla::begin(); // Nicla peripherals initialization, this enables the VDDIO_EXT 3.3V output. + analogWriteResolution(12); + analogWrite(out_pwm, 2048); } void loop() { - // Set the brightness of the LED - analogWrite(ledPin, brightness); + // put your main code here, to run repeatedly: +} +``` - // Update the brightness value - brightness += fadeAmount; +Using this function has some limitations, for example, the PWM signal frequency is fixed at 500 Hz, and this could not be ideal for every application. - // Reverse the fade direction when reaching the limits - if (brightness <= 0 || brightness >= 255) { - fadeAmount = -fadeAmount; - } +![PWM output signal using analogWrite()](assets/500-Hz.png) + +If you need to work with a higher frequency PWM signal, you must do it by working with the PWM peripheral at a lower level as shown in the example code below: + +```arduino +#include "Nicla_System.h" +#include "nrfx_pwm.h" + +static nrfx_pwm_t pwm1 = NRFX_PWM_INSTANCE(0); + +static uint16_t /*const*/ seq1_values[] = {0}; + +static nrf_pwm_sequence_t seq1 = { + .values = { .p_common = seq1_values }, + .length = NRF_PWM_VALUES_LENGTH(seq1_values), + .repeats = 0, + .end_delay = 0 +}; + +void setup() { + + nicla::begin(); // Nicla peripherals initialization, this enables the VDDIO_EXT 3.3V output. + nrfx_pwm_config_t config1 = { + .output_pins = { + 32 + 23, // Nicla Voice pin 3 = pin P0_23 in the ANNAB112 MCU + }, + .irq_priority = APP_IRQ_PRIORITY_LOWEST, + .base_clock = NRF_PWM_CLK_1MHz, // 1 us period + .count_mode = NRF_PWM_MODE_UP, + .top_value = 1000, // PWM counter limit, this will set the final output frequency 1MHz / 1000 = 1KHz + .load_mode = NRF_PWM_LOAD_COMMON, + .step_mode = NRF_PWM_STEP_AUTO, + }; + + nrfx_pwm_init(&pwm1, &config1, NULL); + + (*seq1_values) = 500; // this variable sets the signal duty cycle, for a 50% we are using 500. (1000 / 500 = 1/2) + (void)nrfx_pwm_simple_playback(&pwm1, &seq1, 1, NRFX_PWM_FLAG_LOOP); +} + +void loop() { - // Wait for 30 milliseconds - delay(30); } ``` +The code above results in a 1KHz square waveform with a 50% duty cycle as in the image below. The frequency is defined by the `.base_clock` and `.top_value` variables, and the duty cycle by the `seq1_values` variable. + +![PWM output signal using the PWM at a lower level](assets/1000-Hz.png) + ## Onboard Sensors The Nicla Voice boards come with various onboard sensors that allow you to capture and process environmental and motion data via a high-performance microphone, a 6-axis IMU, and a 3-axis magnetometer. The onboard sensors can be used for developing various applications, such as voice recognition, gesture control, and environmental monitoring. @@ -357,7 +436,7 @@ An external PDM microphone can be connected to the board via an onboard Zero Ins ![Nicla Voice onboard ZIF connector](assets/user-manual-10.png) -The example code shown below captures audio from the onboard microphone of the Nicla Voice, compresses the audio using the G722 codec, and streams the compressed audio data to the serial port. The example can be found in the board's built-in examples by navigating to **File -> Examples -> NDP -> Record_and_stream**. +The example code shown below captures audio from the onboard microphone of the Nicla Voice, compresses the audio using the G722 codec, and streams the compressed audio data to the serial port. The example can be found in the board's built-in examples by navigating to **File > Examples > NDP > Record_and_stream**. Keep in mind that this example code requires the following libraries: @@ -460,99 +539,71 @@ In the `loop()` function: - The length of the extracted audio data is stored in the `len` variable. - The extracted audio data is passed to the G722 encoder, which compresses the audio and sends it to the serial port. -To extract the audio data on a computer, you will need to set up the serial port as raw and dump the data to a file (e.g., test.g722). Then, you can open the file with a software like [Audacity](https://www.audacityteam.org/) to play back the audio. +To extract the audio data on a **Linux computer**, you will need to set up the serial port as raw: + +```bash +stty -F /dev/ttyACM0 115200 raw +``` +Dump the data to a file (e.g., test.g722): + +```bash +cat /dev/ttyACM0 > test.g722 +``` +Then, you can open the file with a software like [Audacity](https://www.audacityteam.org/) to play back the audio. #### Machine Learning and Audio Analysis You can use the Nicla Voice and the [Machine Learning Tools](https://cloud.arduino.cc/machine-learning-tools/) of the Arduino Cloud to create your own audio analysis Machine Learning models. Check out this [tutorial](https://docs.arduino.cc/tutorials/nicla-voice/getting-started-ml) and start with Machine Learning with the Nicla Voice. -### IMU and Magnetometer +![Edge Impulse Studio for ML and AI](assets/edge-impulse.png) -The Nicla Voice features an advanced IMU and a magnetometer, which allows the board to sense motion, orientation, and magnetic fields. The IMU on the Nicla Voice board is the BMI270 from Bosch®. It consists of a 3-axis accelerometer and a 3-axis gyroscope. They can provide information about the board's motion, orientation, and rotation in a 3D space. The BMI270 is designed for wearables and offers low power consumption and high performance, making it suitable for various applications, such as gesture recognition, motion tracking, or stabilization. +Learn more about the Nicla Voice integration with Edge Impulse [here](https://docs.edgeimpulse.com/docs/edge-ai-hardware/mcu-+-ai-accelerators/arduino-nicla-voice). -![Nicla Voice onboard IMU](assets/user-manual-11.png) +### IMU -The onboard magnetometer of the Nicla Voice can be used to determine the board's orientation relative to Earth's magnetic field, which is helpful for compass applications, navigation, or detecting the presence of nearby magnetic objects. The magnetometer on the Nicla Voice board is the BMM150, also from Bosch®. It is a 3-axis sensor that measures the strength and direction of magnetic fields surrounding the board. +The Nicla Voice features an advanced IMU and a magnetometer, which allows the board to sense motion, orientation, and magnetic fields. The IMU on the Nicla Voice board is the BMI270 from Bosch®. It consists of a 3-axis accelerometer and a 3-axis gyroscope. They can provide information about the board's motion, orientation, and rotation in a 3D space. The BMI270 is designed for wearables and offers low power consumption and high performance, making it suitable for various applications, such as gesture recognition, motion tracking, or stabilization. -![Nicla Voice onboard magnetometer](assets/user-manual-12.png) +![Nicla Voice onboard IMU](assets/user-manual-11.png) #### Accelerometer and Gyroscope Data -The example sketch below shows how to get acceleration (m/s2) and angular velocity (in °/s) data from the onboard IMU and streams it to the Arduino IDE Serial Monitor and Serial Plotter. The sketch needs the `BMI270_Init.h` header file to be in the same directory as the sketch. You can download the example sketch and the header files [here](assets/nv_acc_gyro_test.rar). - -```arduino -/** - Nicla Voice accelerometer and gyroscope test sketch - Name: nv_acc_gyro_test.ino - Purpose: Sketch tests onboard accelerometer and gyroscope (BMI270) +The example sketch below shows how to get acceleration (m/s2) and angular velocity (in °/s) data from the onboard IMU and streams it to the Arduino IDE Serial Monitor and Serial Plotter. The sketch needs the `BMI270_Init.h` header file to be in the same directory as the sketch. You can download the example sketch and the header files [here](assets/nv_acc_gyro_test.zip). - @author Arduino PRO Content Team - @version 1.0 22/05/23 -*/ +***For this example to work you must update the NDP processor, see the details on this [section](#ndp120-processor-firmware-update).*** +```arduino #include "NDP.h" #include "BMI270_Init.h" // Named constants -#define READ_START_ADDRESS 0x0C -#define READ_BYTE_COUNT 16 -#define SENSOR_DATA_LENGTH 16 +#define READ_START_ADDRESS 0x0C +#define READ_BYTE_COUNT 16 +#define SENSOR_DATA_LENGTH 16 // Accelerometer range is set to +/-2g -// Raw accelerometer data is represented as a signed 16-bit integer -// Raw accelerometer data can be converted to acceleration in m/s^2 units using the following scale factor: -#define ACCEL_SCALE_FACTOR ((2.0 / 32767.0) * 9.8) +// Raw accelerometer data is represented as a signed 16-bit integer +// Raw accelerometer data can be converted to acceleration in m/s^2 units using the following scale factor: +#define ACCEL_SCALE_FACTOR ((2.0 / 32767.0) * 9.8) // Gyroscope has a sensitivity of 16.4 LSB/dps -#define GYRO_SCALE_FACTOR (1 / 16.4) - -/** - Turns on and off the onboard blue LED. - - @param label to be printed on the Serial Monitor. -*/ -void ledBlueOn(char* label) { - nicla::leds.begin(); - nicla::leds.setColor(blue); - delay(200); - nicla::leds.setColor(off); - Serial.println(label); - nicla::leds.end(); -} - -/** - Turns on and off the onboard green LED. -*/ -void ledGreenOn() { - nicla::leds.begin(); - nicla::leds.setColor(green); - delay(200); - nicla::leds.setColor(off); - nicla::leds.end(); -} - -/** - Blinks onboard red LED periodically every 200 ms. -*/ -void ledRedBlink() { - while (1) { - nicla::leds.begin(); - nicla::leds.setColor(red); - delay(200); - nicla::leds.setColor(off); - delay(200); - nicla::leds.end(); - } -} +#define GYRO_SCALE_FACTOR (1 / 16.4) // Macros for checking the sensor status. -#define CHECK_STATUS(s) do {if (s) {Serial.print("SPI access error in line "); Serial.println(__LINE__); for(;;);}} while (0) +#define CHECK_STATUS(s) \ + do { \ + if (s) { \ + Serial.print("SPI access error in line "); \ + Serial.println(__LINE__); \ + for (;;) \ + ; \ + } \ + } while (0) void setup() { int status; uint8_t __attribute__((aligned(4))) sensor_data[SENSOR_DATA_LENGTH]; - // Initiate Serial communication for debugging and monitoring. + // Initiate Serial communication for debugging and monitoring. Serial.begin(115200); // Initialize Nicla Voice board's system functions. @@ -562,19 +613,10 @@ void setup() { nicla::disableLDO(); nicla::leds.begin(); - // Set up error and event handlers: - // - In case of error, the red LED will blink. - // - In case of match, the blue LED will turn on. - // - In case of any event, the green LED will turn on. - NDP.onError(ledRedBlink); - NDP.onMatch(ledBlueOn); - NDP.onEvent(ledGreenOn); - // NDP processor initialization with firmwares and models Serial.println("- NDP processor initialization..."); NDP.begin("mcu_fw_120_v91.synpkg"); NDP.load("dsp_firmware_v91.synpkg"); - NDP.load("ei_model.synpkg"); Serial.println("- NDP processor initialization done!"); // Set the BMI270 sensor in SPI mode, then read sensor data. @@ -603,7 +645,7 @@ void setup() { status = NDP.sensorBMI270Write(0x59, 0x00); CHECK_STATUS(status); - // Sensor configuration. + // Sensor configuration. Serial.println("- BMI270 initialization starting..."); status = NDP.sensorBMI270Write(0x5E, sizeof(bmi270_maximum_fifo_config_file), (uint8_t*)bmi270_maximum_fifo_config_file); CHECK_STATUS(status); @@ -618,13 +660,13 @@ void setup() { // Configure the device to normal power mode with both accelerometer and gyroscope operational. // Set the accelerometer and gyroscope settings such as measurement range and data rate. - status = NDP.sensorBMI270Write(0x7D, 0x0E); // Normal power mode + status = NDP.sensorBMI270Write(0x7D, 0x0E); // Normal power mode CHECK_STATUS(status); status = NDP.sensorBMI270Write(0x40, 0xA8); // Accelerometer configuration. CHECK_STATUS(status); status = NDP.sensorBMI270Write(0x41, 0x00); // Set the accelerometer range to +/- 2g. CHECK_STATUS(status); - status = NDP.sensorBMI270Write(0x42, 0xA9); // Gyroscope configuration. + status = NDP.sensorBMI270Write(0x42, 0xA9); // Gyroscope configuration. CHECK_STATUS(status); status = NDP.sensorBMI270Write(0x43, 0x00); // Set the gyroscope range to +/- 2000 dps. CHECK_STATUS(status); @@ -658,7 +700,7 @@ void loop() { y_gyr_raw = (0x0000 | sensor_data[8] | sensor_data[9] << 8); z_gyr_raw = (0x0000 | sensor_data[10] | sensor_data[11] << 8); - // Convert raw accelerometer data to acceleration expressed in m/s^2. + // Convert raw accelerometer data to acceleration expressed in m/s^2. x_acc = x_acc_raw * ACCEL_SCALE_FACTOR; y_acc = y_acc_raw * ACCEL_SCALE_FACTOR; z_acc = z_acc_raw * ACCEL_SCALE_FACTOR; @@ -667,8 +709,8 @@ void loop() { x_gyr = x_gyr_raw * GYRO_SCALE_FACTOR; y_gyr = y_gyr_raw * GYRO_SCALE_FACTOR; z_gyr = z_gyr_raw * GYRO_SCALE_FACTOR; - - // Print accelerometer data (expressed in m/s^2). + + // Print accelerometer data (expressed in m/s^2). Serial.print("x_acc:"); Serial.print(x_acc); Serial.print(","); @@ -688,7 +730,7 @@ void loop() { Serial.print("z_gyr:"); Serial.println(z_gyr); - delay(1000); + delay(10); } ``` @@ -699,22 +741,18 @@ First, the necessary libraries are included: - `NDP.h` and `BMI270_Init.h` for the Nicla Voice board's basic functions and the IMU control. - Macros are defined for checking the status of the IMU; these macros allow the sketch to detect and handle sensor errors. -Next, user functions `ledBlueOn()`, `ledGreenOn()`, and `ledRedBlink()` definition: - -- These functions allow the onboard LEDs to flash specific colors to indicate different states: blue for a successful match, green for an event, and red to indicate an error. - Next, in the `setup()` function: -- The serial communication is initialized at a baud rate of 115200. +- The serial communication is initialized at a baud rate of `115200`. - The Nicla Voice board is initialized, and the LDO regulator (used for putting the board into power-saving mode) is disabled to avoid communication problems with the IMU. - Error and event handlers are initialized. -- NDP processor is initialized; this process includes populating the external Flash memory of the board with the NDP processor's internal microcontroller firmware (`mcu_fw_120_v91.synpkg`), the NDP processor's internal DSP firmware (`dsp_firmware_v91.synpkg`), and the Machine Learning model (`ei_model.synpkg`). +- NDP processor is initialized; this process includes populating the external Flash memory of the board with the NDP processor's internal microcontroller firmware (`mcu_fw_120_v91.synpkg`), and the NDP processor's internal DSP firmware (`dsp_firmware_v91.synpkg`). - The BMI270 sensor is initialized; this includes a software reset, loading the sensor configuration, and setting it into normal power mode with the accelerometer and gyroscope operational. Finally, in the `loop()` function: - Memory is allocated for the sensor data; data is then read from the sensor and stored in this allocated space. -- Raw sensor data is then parsed and extracted into raw accelerometer and gyroscope data. This data is represented as 16-bit signed integers ranging from -32 768 to 32 767. +- Raw sensor data is then parsed and extracted into raw accelerometer and gyroscope data. This data is represented as 16-bit signed integers ranging from -32768 to 32767. - Raw sensor data is converted into understandable and standard unit measurements; for the accelerometer, data is converted to meters per second squared, and for the gyroscope, data is converted to degrees per second. - Converted accelerometer and gyroscope data are printed on the Serial Monitor, allowing the user to observe sensor data in real-time. @@ -722,75 +760,36 @@ After uploading the example code, you should see accelerometer and gyroscope dat ![Nicla Voice onboard IMU data on the IDE's Serial Monitor](assets/user-manual-13.png) -Let's use also the Arduino IDE Serial Plotter to test the example IMU sketch; let's start visualizing only accelerometer data. To do so, comment the gyroscope data output as shown below: - -```arduino - // Print accelerometer data (expressed in meters per second squared). - Serial.print("x_acc:"); - Serial.print(x_acc); - Serial.print(","); - Serial.print("y_acc:"); - Serial.print(y_acc); - Serial.print(","); - Serial.print("z_acc:"); - Serial.println(z_acc); +Let's use also the Arduino IDE Serial Plotter to test the example IMU sketch, open the IDE's Serial Plotter by navigating to **Tools > Serial Plotter**. After a while, you should see a real-time graph showing data from the board's onboard accelerometer and gyroscope, as shown below: - /* Print gyroscope data (expressed in degrees per second). - Serial.print("x_gyr:"); - Serial.print(x_gyr); - Serial.print(","); - Serial.print("y_gyr:"); - Serial.print(y_gyr); - Serial.print(","); - Serial.print("z_gyr:"); - Serial.println(z_gyr); */ -``` +![Nicla Voice onboard IMU data on the IDE's Serial Plotter](assets/imu_gyro.gif) -Upload the example sketch again and open the IDE's Serial Plotter by navigating to **Tools > Serial Plotter**. After a while, you should see a real-time graph showing data from the board's onboard accelerometer, as shown below (move the board): +When the board is not moving, you should see acceleration measurements close to zero on the x and y-axis, while the z-axis will be close to 1g (approximately 9.81 m/s2), the gyroscope measurements on the three-axis will stay close to zero. +#### IMU and Machine Learning -![Nicla Voice onboard accelerometer data on the IDE's Serial Plotter](assets/user-manual-14.gif) - -When the board is not moving, you should see acceleration measurements close to zero on the x and y-axis, while the z-axis will be close to 1g (approximately 9.81 m/s2). If you want to visualize gyroscope readings, uncomment the gyroscope data output and comment on the accelerometer data output; when the board is not moving, you should see gyroscope measurements on the three-axis close to zero. - -#### Magnetometer Data +The example code below demonstrates how to use the Nicla Voice board to perform Machine Learning inference on IMU data. The code sets up event indicators using the onboard RGB LED and sends IMU data to the NDP processor for inference. The example can be found in the board's built-in examples by navigating to **File > Examples > NDP > IMUDemo**. -The example code below shows how to get raw magnetic field and Hall resistance data from the onboard magnetometer and stream it to the Arduino IDE Serial Monitor and Serial Plotter. You can download the example sketch [here](assets/nv_mag_test.rar). ```arduino -/** - Nicla Voice magnetometer test sketch - Name: nv_mag_test.ino - Purpose: Sketch tests onboard magnetometer (BMM150) - - @author Arduino PRO Content Team - @version 1.0 30/05/23 -*/ - #include "NDP.h" -// Named constants -#define READ_START_ADDRESS 0x42 -#define READ_BYTE_COUNT 8 -#define SENSOR_DATA_LENGTH 16 +// Set to 'true' for the lowest power consumption mode, 'false' otherwise +const bool lowestPower = false; -/** - Turns on and off the onboard blue LED. - - @param label to be printed on the Serial Monitor. -*/ +// Function to turn on the blue LED and print a label to the serial monitor if not in the lowest power mode void ledBlueOn(char* label) { nicla::leds.begin(); nicla::leds.setColor(blue); delay(200); nicla::leds.setColor(off); - Serial.println(label); + if (!lowestPower) { + Serial.println(label); + } nicla::leds.end(); } -/** - Turns on and off the onboard green LED. -*/ +// Function to turn on the green LED briefly void ledGreenOn() { nicla::leds.begin(); nicla::leds.setColor(green); @@ -799,6 +798,94 @@ void ledGreenOn() { nicla::leds.end(); } +// Function to make the red LED blink continuously +void ledRedBlink() { + while (1) { + nicla::leds.begin(); + nicla::leds.setColor(red); + delay(200); + nicla::leds.setColor(off); + delay(200); + nicla::leds.end(); + } +} + +void setup() { + Serial.begin(115200); + nicla::begin(); + nicla::disableLDO(); + nicla::leds.begin(); + + // Register event handlers for error, match, and event + NDP.onError(ledRedBlink); + NDP.onMatch(ledBlueOn); + NDP.onEvent(ledGreenOn); + + // Load Edge Impulse model and related firmware + Serial.println("Loading synpackages"); + NDP.begin("mcu_fw_120_v91.synpkg"); + NDP.load("dsp_firmware_v91.synpkg"); + NDP.load("ei_model_imu.synpkg"); + Serial.println("packages loaded"); + + NDP.getInfo(); + NDP.configureInferenceThreshold(1088); + NDP.interrupts(); + + // Enter the lowest power mode, if set + nicla::leds.end(); + if (lowestPower) { + NRF_UART0->ENABLE = 0; + } +} + +// Predefined IMU data for testing +extern "C" const unsigned char data_opensset_bin[]; +extern "C" const unsigned char data_circ_bin[]; +extern "C" const unsigned int data_opensset_bin_len; +extern "C" const unsigned int data_circ_bin_len; + +void loop() { + // Send openset data (no match expected) + Serial.println("Sending openset data... (no match expected)"); + NDP.sendData((uint8_t*)data_opensset_bin, data_opensset_bin_len); + delay(1000); + + // Send circular IMU data (match expected) + Serial.println("Sending circular IMU data.... (match expected)"); + NDP.sendData((uint8_t*)data_circ_bin, data_circ_bin_len); + delay(5000); +} +``` + +In the example code above, a Machine Learning model is loaded into the Nicla Voice board, and predefined IMU data is sent to the Machine Learning model for inferencing. Depending on the result, the board will light its built-in RGB LED with different colors: + +- If the model matches the input data with a known motion pattern, the built-in RGB LED is turned blue, and the event label is printed to the IDE's Serial Monitor. +- If an error occurs, the built-in RGB LED will blink red continuously. +- While an event is recognized, the built-in RGB LED is turned on green. + +To learn more about your Nicla Voice board Machine Learning capabilities, check out the following tutorial and learn how to create a simple motion detection application: + +- [Motion Detection with Nicla Voice and Machine Learning Tools](https://docs.arduino.cc/tutorials/nicla-voice/motion-detection-ml) + +### Magnetometer + +The onboard magnetometer of the Nicla Voice can be used to determine the board's orientation relative to Earth's magnetic field, which is helpful for compass applications, navigation, or detecting the presence of nearby magnetic objects. The magnetometer on the Nicla Voice board is the BMM150, also from Bosch®. It is a 3-axis sensor that measures the strength and direction of magnetic fields surrounding the board. + +![Nicla Voice onboard magnetometer](assets/user-manual-12.png) + +#### Magnetometer Data + +The example code below shows how to get raw magnetic field and Hall resistance data from the onboard magnetometer and stream it to the Arduino IDE Serial Monitor and Serial Plotter. You can download the example sketch [here](assets/nv_mag_test.rar). + +```arduino +#include "NDP.h" + +// Named constants +#define READ_START_ADDRESS 0x42 +#define READ_BYTE_COUNT 8 +#define SENSOR_DATA_LENGTH 16 + /** Blinks onboard red LED periodically every 200 ms. */ @@ -814,19 +901,21 @@ void ledRedBlink() { } // Macro definition used for checking the sensor status, print error if SPI access fails. -#define CHECK_STATUS(s) do {\ - if (s) {\ - Serial.print("SPI access error in line ");\ - Serial.println(__LINE__);\ - for(;;);\ - }\ -} while (0) +#define CHECK_STATUS(s) \ + do { \ + if (s) { \ + Serial.print("SPI access error in line "); \ + Serial.println(__LINE__); \ + for (;;) \ + ; \ + } \ + } while (0) void setup() { int status; uint8_t __attribute__((aligned(4))) sensor_data[SENSOR_DATA_LENGTH]; - // Initiate Serial communication for debugging and monitoring. + // Initiate Serial communication for debugging and monitoring. Serial.begin(115200); // Initialize Nicla Voice board's system functions. @@ -838,17 +927,13 @@ void setup() { // Set up error and event handlers: // - In case of error, the red LED will blink. - // - In case of match, the blue LED will turn on. - // - In case of any event, the green LED will turn on. + NDP.onError(ledRedBlink); - NDP.onMatch(ledBlueOn); - NDP.onEvent(ledGreenOn); // NDP processor initialization with firmwares and models. Serial.println("- NDP processor initialization..."); NDP.begin("mcu_fw_120_v91.synpkg"); NDP.load("dsp_firmware_v91.synpkg"); - NDP.load("ei_model.synpkg"); Serial.println("- NDP processor initialization done!"); // Enable power control bit @@ -856,7 +941,7 @@ void setup() { CHECK_STATUS(status); delay(20); - // Read power control byte + // Read power control byte status = NDP.sensorBMM150Read(0x4B, 1, sensor_data); CHECK_STATUS(status); @@ -871,10 +956,10 @@ void setup() { void loop() { // Allocate space for raw sensor data. - uint8_t __attribute__((aligned(4))) sensor_data[SENSOR_DATA_LENGTH]; + uint8_t __attribute__((aligned(4))) sensor_data[SENSOR_DATA_LENGTH]; // Declare variables for magnetometer data. - int16_t x_mag_raw, y_mag_raw, z_mag_raw, hall_raw ; + int16_t x_mag_raw, y_mag_raw, z_mag_raw, hall_raw; float x_mag, y_mag, z_mag; // Read operation status variable. @@ -891,6 +976,7 @@ void loop() { // The sensor data is read into an array of bytes (8-bit values). Each measurement from the magnetometer consists // of two bytes, hence the bit shifting and bitwise OR operations to combine these two bytes into one 16-bit value. // Data for each axis (X, Y, Z) of the magnetometer is extracted from the array. + x_mag_raw = (0x0000 | sensor_data[0] >> 3 | sensor_data[1] << 5); y_mag_raw = (0x0000 | sensor_data[2] >> 3 | sensor_data[3] << 5); z_mag_raw = (0x0000 | sensor_data[4] >> 1 | sensor_data[5] << 7); @@ -909,7 +995,7 @@ void loop() { Serial.print("hall_raw:"); Serial.println(hall_raw); - delay(1000); + delay(100); } ``` @@ -920,16 +1006,16 @@ First, the necessary libraries are included: - `NDP.h` for the Nicla Voice board's basic functions and the magnetometer control. - Macros are defined for checking the status of the magnetometer; these macros allow the sketch to detect and handle sensor errors. -Next, user functions `ledBlueOn()`, `ledGreenOn()`, and `ledRedBlink()` definition: +Next, user function `ledRedBlink()` definition: -- These functions allow the onboard LEDs to flash specific colors to indicate different states: blue for a successful match, green for an event, and red to indicate an error. +- These functions allow the onboard LEDs to flash red to indicate an error. Next, in the `setup()` function: - The serial communication is initialized at a baud rate of 115200. - The Nicla Voice board is initialized, and the LDO regulator (used for putting the board into power-saving mode) is disabled to avoid communication problems with the magnetometer. - Error and event handlers are initialized. -- NDP processor is initialized; this process includes populating the external Flash memory of the board with the NDP processor's internal microcontroller firmware (`mcu_fw_120_v91.synpkg`), the NDP processor's internal DSP firmware (`dsp_firmware_v91.synpkg`), and the Machine Learning model (`ei_model.synpkg`). +- NDP processor is initialized; this process includes populating the external Flash memory of the board with the NDP processor's internal microcontroller firmware (`mcu_fw_120_v91.synpkg`) and the NDP processor's internal DSP firmware (`dsp_firmware_v91.synpkg`). - The BMM150 sensor is initialized; this includes setting it into normal operation with an output data rate (ODR) of 10 Hz. Finally, in the `loop()` function: @@ -946,108 +1032,6 @@ Now open the IDE's Serial Plotter by navigating to **Tools > Serial Plotter**. A ![Nicla Voice onboard raw magnetometer data on the IDE's Serial Plotter](assets/user-manual-16.gif) -#### IMU and Machine Learning - -The example code below demonstrates how to use the Nicla Voice board to perform Machine Learning inference on IMU data. The code sets up event indicators using the onboard RGB LED and sends IMU data to the NDP processor for inference. The example can be found in the board's built-in examples by navigating to **File > Examples > NDP > IMUDemo**. - - -```arduino -#include "NDP.h" - -// Set to 'true' for the lowest power consumption mode, 'false' otherwise -const bool lowestPower = false; - -// Function to turn on the blue LED and print a label to the serial monitor if not in the lowest power mode -void ledBlueOn(char* label) { - nicla::leds.begin(); - nicla::leds.setColor(blue); - delay(200); - nicla::leds.setColor(off); - if (!lowestPower) { - Serial.println(label); - } - nicla::leds.end(); -} - -// Function to turn on the green LED briefly -void ledGreenOn() { - nicla::leds.begin(); - nicla::leds.setColor(green); - delay(200); - nicla::leds.setColor(off); - nicla::leds.end(); -} - -// Function to make the red LED blink continuously -void ledRedBlink() { - while (1) { - nicla::leds.begin(); - nicla::leds.setColor(red); - delay(200); - nicla::leds.setColor(off); - delay(200); - nicla::leds.end(); - } -} - -void setup() { - Serial.begin(115200); - nicla::begin(); - nicla::disableLDO(); - nicla::leds.begin(); - - // Register event handlers for error, match, and event - NDP.onError(ledRedBlink); - NDP.onMatch(ledBlueOn); - NDP.onEvent(ledGreenOn); - - // Load Edge Impulse model and related firmware - Serial.println("Loading synpackages"); - NDP.begin("mcu_fw_120_v91.synpkg"); - NDP.load("dsp_firmware_v91.synpkg"); - NDP.load("ei_model_imu.synpkg"); - Serial.println("packages loaded"); - - NDP.getInfo(); - NDP.configureInferenceThreshold(1088); - NDP.interrupts(); - - // Enter the lowest power mode, if set - nicla::leds.end(); - if (lowestPower) { - NRF_UART0->ENABLE = 0; - } -} - -// Predefined IMU data for testing -extern "C" const unsigned char data_opensset_bin[]; -extern "C" const unsigned char data_circ_bin[]; -extern "C" const unsigned int data_opensset_bin_len; -extern "C" const unsigned int data_circ_bin_len; - -void loop() { - // Send openset data (no match expected) - Serial.println("Sending openset data... (no match expected)"); - NDP.sendData((uint8_t*)data_opensset_bin, data_opensset_bin_len); - delay(1000); - - // Send circular IMU data (match expected) - Serial.println("Sending circular IMU data.... (match expected)"); - NDP.sendData((uint8_t*)data_circ_bin, data_circ_bin_len); - delay(5000); -} -``` - -In the example code above, a Machine Learning model is loaded into the Nicla Voice board, and predefined IMU data is sent to the Machine Learning model for inferencing. Depending on the result, the board will light its built-in RGB LED with different colors: - -- If the model matches the input data with a known motion pattern, the built-in RGB LED is turned blue, and the event label is printed to the IDE's Serial Monitor. -- If an error occurs, the built-in RGB LED will blink red continuously. -- While an event is recognized, the built-in RGB LED is turned on green. - -To learn more about your Nicla Voice board Machine Learning capabilities, check out the following tutorial and learn how to create a simple motion detection application: - -- [Motion Detection with Nicla Voice and Machine Learning Tools](https://docs.arduino.cc/tutorials/nicla-voice/motion-detection-ml) - ## Actuators ### RGB LED @@ -1198,9 +1182,9 @@ digitalWrite(SS, HIGH); The Nicla Voice supports I2C communication, which allows data transmission between the board and other I2C-compatible devices. The pins used in the Nicla Voice for the I2C communication protocol are the following: | **Microcontroller Pin** | **Arduino Pin Mapping** | -|:-----------------------:|:-----------------------:| -| `P0_23` | `SCL` or `3` | -| `P0_22` | `SDA` or `4` | +| :---------------------: | :---------------------: | +| `P0_23` | `I2C_SCL` or `3` | +| `P0_22` | `I2C_SDA` or `4` | Please, refer to the [board pinout section](#pins) of the user manual to find them on the board. The I2C pins are also available through the onboard ESLOV connector of the Nicla Voice. @@ -1265,9 +1249,9 @@ while (Wire.available()) { The pins used in the Nicla Voice for the UART communication protocol are the following: | **Microcontroller Pin** | **Arduino Pin Mapping** | -|:-----------------------:|:-----------------------:| -| `P0_09` | `TX` or `1` | -| `P0_20` | `RX` or `2` | +| :---------------------: | :---------------------: | +| `P0_09` | `SERIAL1_TX` or `1` | +| `P0_20` | `SERIAL1_RX` or `2` | Please, refer to the [board pinout section](#board-pinout) of the user manual to find them on the board.