diff --git a/README.md b/README.md
index f4ee4f2..156f229 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,25 @@
# edgeimpulse_ros
-ROS2 wrapper for Edge Impulse
+ROS2 wrapper for Edge Impulse on Linux.
-## How to install
+
+## 1. Topics
+
+- `/detection/input/image`, image topic to analyze
+- `/detection/output/image`, image with bounding boxes
+- `/detection/output/info`, VisionInfo message
+- `/detection/output/results`, results as text
+
+## 2. Parameters
+
+- `frame_id` (**string**), _"base_link"_, frame id of output topics
+- `model.filepath` (**string**), _""_, absolute filepath to .eim file
+- `show.overlay` (**bool**), _true_, show bounding boxes on output image
+- `show.labels` (**bool**), _true_, show labels on bounding boxes,
+- `show.classification_info` (**bool**), _true_, show the attendibility (0-1) of the prediction
+
+
+## 3. How to install
1. install edge_impulse_linux:
`pip3 install edge_impulse_linux`
@@ -30,12 +47,23 @@ ROS2 wrapper for Edge Impulse
`source install/setup.bash`
-## How to run
+## 4. How to run
Launch the node:
`ros2 run edgeimpulse_ros image_classification --ros-args -p model.filepath:="" -r /detection/input/image:="/your_image_topic"`
`
+## 5. Models
+
+Here you find some prebuilt models: [https://github.com/gbr1/edgeimpulse_example_models](https://github.com/gbr1/edgeimpulse_example_models)
+
+## 6. Known issues
+
+- this wrapper works on foxy, galactic and humble are coming soon (incompatibility on vision msgs by ros-perception)
+- if you use a classification model, topic results is empty
+- you cannot change color of bounding boxes (coming soon)
+- other types (imu and sound based ml) are unavailable
+
***Copyright © 2022 Giovanni di Dio Bruno - gbr1.github.io***
diff --git a/edgeimpulse_ros/__pycache__/image_classification.cpython-38.pyc b/edgeimpulse_ros/__pycache__/image_classification.cpython-38.pyc
index aa85919..59650d8 100644
Binary files a/edgeimpulse_ros/__pycache__/image_classification.cpython-38.pyc and b/edgeimpulse_ros/__pycache__/image_classification.cpython-38.pyc differ
diff --git a/edgeimpulse_ros/image_classification.py b/edgeimpulse_ros/image_classification.py
index 3a65bbf..424103c 100644
--- a/edgeimpulse_ros/image_classification.py
+++ b/edgeimpulse_ros/image_classification.py
@@ -13,6 +13,7 @@
# limitations under the License.
+from distutils.log import info
from unittest import result
from .submodules import device_patches
import cv2
@@ -27,7 +28,7 @@
from vision_msgs.msg import Detection2DArray
from vision_msgs.msg import Detection2D
from vision_msgs.msg import ObjectHypothesisWithPose
-#from vision_msgs.msg import VisionInfo
+from vision_msgs.msg import VisionInfo
import os
import time
@@ -44,16 +45,23 @@ def __init__(self):
self.occupied = False
self.img = None
+ self.info_msg = VisionInfo()
self.cv_bridge = CvBridge()
super().__init__('ei_image_classifier_node')
self.init_parameters()
self.ei_classifier = self.EI_Classifier(self.modelfile, self.get_logger())
+ self.info_msg.header.frame_id = self.frame_id
+ self.info_msg.method = self.ei_classifier.model_info['model_parameters']['model_type']
+ self.info_msg.database_location = self.ei_classifier.model_info['project']['name']+' / '+self.ei_classifier.model_info['project']['owner']
+ self.info_msg.database_version = self.ei_classifier.model_info['project']['deploy_version']
+
self.timer_parameter = self.create_timer(2,self.parameters_callback)
self.image_publisher = self.create_publisher(Image,'/detection/output/image',1)
self.results_publisher = self.create_publisher(Detection2DArray,'/detection/output/results',1)
+ self.info_publisher = self.create_publisher(VisionInfo, '/detection/output/info',1)
self.timer_classify = self.create_timer(0.01,self.classify_callback)
self.timer_classify.cancel()
@@ -137,7 +145,7 @@ def classify_callback(self):
# object with hypthothesis
obj_hyp = ObjectHypothesisWithPose()
- #obj_hyp.id =
+ obj_hyp.id = bb['label'] #str(self.ei_classifier.labels.index(bb['label']))
obj_hyp.score = bb['value']
obj_hyp.pose.pose.position.x = float(bb['x'])
obj_hyp.pose.pose.position.y = float(bb['y'])
@@ -166,6 +174,8 @@ def classify_callback(self):
# publish message
self.image_publisher.publish(self.cv_bridge.cv2_to_imgmsg(cropped,"bgr8"))
self.results_publisher.publish(results_msg)
+ self.info_msg.header.stamp = time_now
+ self.info_publisher.publish(self.info_msg)
self.occupied= False
self.timer_classify.cancel()