Skip to content

Commit

Permalink
Added inference node.
Browse files Browse the repository at this point in the history
  • Loading branch information
vangeliq committed Oct 19, 2024
1 parent 0c6d18b commit 4c09bc5
Show file tree
Hide file tree
Showing 5 changed files with 176 additions and 9 deletions.
37 changes: 30 additions & 7 deletions workspace_python/ros2_ws/src/python_workspace/README.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,34 @@
Running the camera node:
ros2 run python_workspace camera_node --ros-args -p static_image_path:='/home/user/Desktop/ROS/Models/Maize Model/sample_maize_images' -p loop:=-1 -p frame_rate:=10 -p model_dimensions:=[640,640]

Running the picture node:
`ros2 run python_workspace picture_node --ros-args -p static_image_path:='./../assets/maize' -p loop:=-1 -p fram
e_rate:=1`
### List of Nodes

| Node Type | Subscribes To | Publishes To | Example command |
|-------------------|----------------|-----------------------------------|------|
| Picture Node | `/picture/command` | `/input_image` | `ros2 run python_workspace picture_node --ros-args -p static_image_path:='./../assets/maize' -p loop:=-1 -p frame_rate:=1`|
| Inference Node | `/input_image` | -`/bounding_box_coordinates` <br> - `/output_img` | `ros2 run python_workspace inference_node --ros-args -p weights_path:='./ros2_ws/src/python_workspace/python_workspace/scripts/yolo11n.pt'`|

ros2 run python_workspace jetson_node

ros2 run python_workspace extermination_node
### List of Topics and Data Types

| Topic Name | Description | Data Type |
|-----------------------------|--------------------------------------|--------------------|
| `/input_image` | Input image for processing | `sensor_msgs/Image`|
| `/bounding_box_coordinates` | Coordinates of detected objects (xyxy) | `std_msgs/Float32MultiArray` |
| `/output_img` | Processed output image | `sensor_msgs/Image`|



### Other commands
#### Running the camera node:
`ros2 run python_workspace camera_node --ros-args -p static_image_path:='/home/user/Desktop/ROS/Models/Maize Model/sample_maize_images' -p loop:=-1 -p frame_rate:=10 -p model_dimensions:=[640,640]`

#### Running the picture node:

`ros2 run python_workspace picture_node --ros-args -p static_image_path:='./../assets/maize' -p loop:=-1 -p frame_rate:=1`

#### Running the inference node:

The path for weights (.pt) is relative to the ROS/workspace_python/ros2_ws directory.

```bash
ros2 run python_workspace inference_node --ros-args -p weights_path:='./ros2_ws/src/python_workspace/python_workspace/scripts/yolo11n.pt'
```
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import os
from cv_bridge import CvBridge

import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import Int32MultiArray, Float32MultiArray

from .scripts.utils import ModelInference

class InferenceNode(Node):
def __init__(self):
super().__init__('inference_node')

self.declare_parameter('weights_path', './src/python_workspace/python_workspace/scripts/best.onnx')
self.declare_parameter('precision', 'fp32') # fp32, fp16 # todo: do something with strip_weights and precision

self.weights_path = self.get_parameter('weights_path').get_parameter_value().string_value
if not os.path.isabs(self.weights_path):
self.weights_path = os.path.join(os.getcwd(), self.weights_path)


self.precision = self.get_parameter('precision').get_parameter_value().string_value

# instantiate the model here
self.model = ModelInference(self.weights_path, self.precision)

self.bridge = CvBridge()

self.subscription = self.create_subscription(Image, 'input_image', self.image_callback, 10)

# create a publisher for the output image/boxes/extermination data
self.box_publisher = self.create_publisher(Float32MultiArray,'bounding_box_coordinates', 10)

self.output_image_publisher = self.create_publisher(Image, 'output_img', 10)


def image_callback(self, msg):
# print("============================================")
opencv_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
output_img, named_classes, confidences, boxes = self.model.inference(opencv_img)

output_msg = Float32MultiArray()
output_msg.data = []
if len(boxes) != 0:
output_msg.data = boxes
self.box_publisher.publish(output_msg)


# convert the output image to a ROS2 image message
output_msg = self.bridge.cv2_to_imgmsg(output_img, encoding='rgb8')

self.output_image_publisher.publish(output_msg)



def main(args=None):
rclpy.init(args=args)
inference_node = InferenceNode()
rclpy.spin(inference_node)
inference_node.destroy_node()
rclpy.shutdown()

if __name__ == '__main__':
main()
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import cv2
import time, os
import tensorrt as trt
import pycuda.driver as cuda
import cupy as cp
import numpy as np
from sensor_msgs.msg import Image
from ultralytics import YOLO
from ultralytics.engine.results import Results


#utility class for the python workspace


class ModelInference:
"""
class that performs inference using a model
"""
def __init__(self, weights_path, precision):

# Initialize instance variables
self.precision = precision

if not os.path.exists(weights_path):
print(f"weights file not found at {weights_path}")
raise FileNotFoundError(f"weights file not found at {weights_path}")

self.yolo = YOLO(weights_path)

def preprocess(self, image: np.ndarray):
"""
Takes in a numpy array that has been preprocessed
No preprocessing is nededed for YOLO
"""
return image


def inference(self, image_array: np.ndarray) -> Results:
# https://docs.ultralytics.com/modes/predict/#inference-arguments
"""
perform inference on the image array.
Returns list of predicted classes, their confidences and bounding boxes
"""
height, width, _ = image_array.shape
size = max(height, width)
results =self.yolo.predict(image_array,imgsz=size) #we are only predicting one image


if results[0] is not None:
out_img, named_classes, confidences, boxes = self.postprocess(results[0])

return out_img,named_classes, confidences, boxes

return None, [], [], []

def postprocess(self, result:Results) -> np.ndarray:
"""
postprocess the Result that we got from prediction
returns the image with bounding boxes
class names, confidences and bounding boxes
Since YOLO does this for us we just need to convert the BGR image to RGB.
"""
out_img = cv2.cvtColor(result.plot(), cv2.COLOR_BGR2RGB) # Convert BGR to RGB

class_mapping = self.yolo.model.names
named_classes = [class_mapping[int(i)] for i in result.boxes.cls]
confidences = result.boxes.conf
print(f"Bounding boxes (xyxy format): {result.boxes.xyxy}")
boxes = result.boxes.xyxy.flatten().tolist()


return out_img,named_classes, confidences, boxes

def print_info(self):
print(self.yolo.info())
7 changes: 5 additions & 2 deletions workspace_python/ros2_ws/src/python_workspace/setup.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import os
from glob import glob
from setuptools import setup
from setuptools import setup, find_packages

package_name = 'python_workspace'
subfolder = f'{package_name}/scripts'

setup(
name=package_name,
version='0.0.0',
packages=[package_name],
packages=[package_name,subfolder],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
Expand All @@ -24,8 +25,10 @@
'video_node = python_workspace.video_node:main',
'camera_node = python_workspace.zed_camera_node:main',
'picture_node = python_workspace.picture_node:main',
'inference_node = python_workspace.inference_node:main',
'jetson_node = python_workspace.jetson_node:main',
'extermination_node = python_workspace.extermination_node:main'
],
},

)

0 comments on commit 4c09bc5

Please sign in to comment.