diff --git a/cpp_wip/cuda_stream_manager.cpp b/cpp_wip/cuda_stream_manager.cpp new file mode 100644 index 0000000..742f27e --- /dev/null +++ b/cpp_wip/cuda_stream_manager.cpp @@ -0,0 +1,41 @@ +// cuda_stream_manager.hpp +#pragma once +#include +#include + +class CudaStreamManager { +public: + CudaStreamManager() { + // Create a single CUDA stream + cudaStreamCreate(&stream_); + + // Create CUDA events + cudaEventCreate(&preprocess_done_); + cudaEventCreate(&inference_done_); + } + + ~CudaStreamManager() { + // Destroy CUDA stream and events + cudaStreamDestroy(stream_); + cudaEventDestroy(preprocess_done_); + cudaEventDestroy(inference_done_); + } + + cudaStream_t getStream() const { + return stream_; + } + + cudaEvent_t& getPreprocessEvent() { + return preprocess_done_; + } + + cudaEvent_t& getInferenceEvent() { + return inference_done_; + } + +private: + cudaStream_t stream_; + cudaEvent_t preprocess_done_, inference_done_; +}; + +using CudaStreamManagerPtr = std::shared_ptr; diff --git a/cpp_wip/preprocessing_node.cpp b/cpp_wip/preprocessing_node.cpp new file mode 100644 index 0000000..add1426 --- /dev/null +++ b/cpp_wip/preprocessing_node.cpp @@ -0,0 +1,23 @@ +// preprocessing_node.cpp +#include +#include "cuda_stream_manager.hpp" + +class PreprocessingNode : public rclcpp::Node { +public: + PreprocessingNode(const CudaStreamManagerPtr& cuda_manager) + : Node("preprocessing_node"), cuda_manager_(cuda_manager) {} + + void preprocess() { + // Perform GPU preprocessing here using cuda_manager_->getStream() + + // Signal that preprocessing is done + cudaEventRecord(cuda_manager_->getPreprocessEvent(), cuda_manager_->getStream()); + } + +private: + CudaStreamManagerPtr cuda_manager_; +}; + +// Register as a composable node +#include "rclcpp_components/register_node_macro.hpp" +RCLCPP_COMPONENTS_REGISTER_NODE(PreprocessingNode) diff --git a/python_wip/Stream_E2E.py b/python_wip/Stream_E2E.py index 6384ff9..6660970 100644 --- a/python_wip/Stream_E2E.py +++ b/python_wip/Stream_E2E.py @@ -158,4 +158,66 @@ def postprocess_callback(self, msg): # Clean up the IPC memory cuda.ipc_close_mem_handle(d_output) -# this uses unified memory... \ No newline at end of file +# this uses unified memory... + +stream = cv2.cuda_Stream() +gpu_image = cv2.cuda_GpuMat(image.shape) +gpu_image.upload(image, stream=stream) + +# Recolor the image on the GPU +gpu_image = cv2.cuda.cvtColor(gpu_image, cv2.COLOR_BGR2GRAY, stream=stream) + +# Perform additional operations like resize +resized_image = cv2.cuda.resize(gpu_image, (640, 480), stream=stream) + +# All operations happen within the GPU stream + + +import cupy as cp +stream = cp.cuda.Stream() + +with stream: + # Create a CuPy array (on GPU) + gpu_image = cp.array(np_image) # np_image is the original CPU image + + # Normalize the image in-place + cp.subtract(gpu_image, 128, out=gpu_image) # In-place operation + + # Transpose the image in-place + gpu_image = gpu_image.transpose((1, 0, 2)) # Change image axes + + +# interoperability: +import cv2 +import cupy as cp + +# Create a CUDA stream +stream = cv2.cuda_Stream() + +# Allocate a GPU Mat in OpenCV +gpu_image = cv2.cuda_GpuMat(image.shape) + +# Upload image to GPU in the stream +gpu_image.upload(image, stream=stream) + +# Get GPU pointer and wrap it as a CuPy array (no CPU-GPU copy) +ptr = gpu_image.cudaPtr() +gpu_image_cupy = cp.ndarray(image.shape, cp.uint8, cp.cuda.MemoryPointer(cp.cuda.Memory(ptr), 0)) + +# Perform CuPy operations (like normalization) in-place +gpu_image_cupy = gpu_image_cupy / 255.0 + +# retrieving CUDA stream handle from OpenCV CUDA stream object: +stream = cv2.cuda.Stream() # OpenCV CUDA stream +cuda_stream = stream.cudaPtr() # Extract the CUDA stream handle + +# pass it to TensorRT just like you would with a normal CUDA stream. +import pycuda.driver as cuda +import cupy as cp + +# Assuming d_input_ptr, d_output, self.d_input, self.d_output, and self.exec_context are already defined. +cuda.memcpy_dtod_async(self.d_input, d_input_ptr, cp.prod(self.input_shape) * cp.dtype(cp.float32).itemsize, stream=cuda_stream) # Copy input data to the allocated memory in TensorRT +self.exec_context.execute_async_v2(bindings=[int(self.d_input), int(self.d_output)], stream_handle=cuda_stream) # Execute inference asynchronously using the OpenCV CUDA stream handle +output = cp.empty(self.output_shape, dtype=cp.float32) +cuda.memcpy_dtod_async(output.data, self.d_output, stream=cuda_stream) # Copy output to variable +cuda.Stream.synchronize(cuda_stream) # Synchronize the stream diff --git a/python_wip/cuda_download_1.py b/python_wip/cuda_download_1.py new file mode 100644 index 0000000..7304b3d --- /dev/null +++ b/python_wip/cuda_download_1.py @@ -0,0 +1,42 @@ +import pyzed.sl as sl + +# Create a ZED Camera object +zed = sl.Camera() + +# Create InitParameters object and set configuration parameters +init_params = sl.InitParameters() +init_params.camera_resolution = sl.RESOLUTION.HD720 # Set resolution +init_params.depth_mode = sl.DEPTH_MODE.ULTRA # Set depth mode + +# Open the camera +status = zed.open(init_params) +if status != sl.ERROR_CODE.SUCCESS: + print(f"Camera failed to open: {status}") + exit(1) + +# Create a Mat object for the image (GPU memory type) +image_gpu = sl.Mat(zed.get_camera_information().camera_resolution.width, + zed.get_camera_information().camera_resolution.height, + sl.MAT_TYPE.U8_C4, sl.MEM.GPU) + +# Capture an image frame +runtime_params = sl.RuntimeParameters() + +if zed.grab(runtime_params) == sl.ERROR_CODE.SUCCESS: + # Retrieve image directly into GPU memory + zed.retrieve_image(image_gpu, sl.VIEW.LEFT, sl.MEM.GPU) + + # Now `image_gpu` holds the image in GPU memory + print("Image captured and stored in CUDA memory") + +# Close the camera +zed.close() + +# Create a CPU Mat to store the image +image_cpu = sl.Mat() + +# Copy image from GPU to CPU +image_gpu.copy_to(image_cpu) + +# Save the image (this is in CPU memory now) +image_cpu.write("image_from_cuda.png") diff --git a/python_wip/cuda_stream_inference.py b/python_wip/cuda_stream_inference.py new file mode 100644 index 0000000..222c394 --- /dev/null +++ b/python_wip/cuda_stream_inference.py @@ -0,0 +1,51 @@ +# inference_node.py +import rclpy +from rclpy.node import Node +from cuda_manager import CudaStreamManager +import pycuda.driver as cuda +import numpy as np + +class InferenceNode(Node): + def __init__(self, cuda_manager): + super().__init__('inference_node') + self.cuda_manager = cuda_manager + + def infer(self): + self.get_logger().info("Waiting for preprocessing to complete...") + self.cuda_manager.get_preprocess_event().synchronize() + self.get_logger().info("Starting inference on GPU...") + + # Simulate inference on GPU + data = np.random.randn(1024, 1024).astype(np.float32) + gpu_data = cuda.mem_alloc(data.nbytes) + cuda.memcpy_htod_async(gpu_data, data, self.cuda_manager.get_stream()) + + # Signal inference completion + self.cuda_manager.get_inference_event().record(self.cuda_manager.get_stream()) + self.get_logger().info("Inference complete.") + +# post processing: +# postprocessing_node.py +import rclpy +from rclpy.node import Node +from cuda_manager import CudaStreamManager +import pycuda.driver as cuda +import numpy as np + +class PostprocessingNode(Node): + def __init__(self, cuda_manager): + super().__init__('postprocessing_node') + self.cuda_manager = cuda_manager + + def postprocess(self): + self.get_logger().info("Waiting for inference to complete...") + self.cuda_manager.get_inference_event().synchronize() + self.get_logger().info("Starting postprocessing on GPU...") + + # Simulate postprocessing on GPU + data = np.random.randn(1024, 1024).astype(np.float32) + gpu_data = cuda.mem_alloc(data.nbytes) + cuda.memcpy_htod_async(gpu_data, data, self.cuda_manager.get_stream()) + + # Assume postprocessing is complete + self.get_logger().info("Postprocessing complete.") diff --git a/python_wip/function_tracing.py b/python_wip/function_tracing.py new file mode 100644 index 0000000..890f028 --- /dev/null +++ b/python_wip/function_tracing.py @@ -0,0 +1,38 @@ +import time +from functools import wraps +import rclpy +from rclpy.node import Node +from std_msgs.msg import String + +def trace(func): + @wraps(func) + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + print(f'Function {func.__name__} took {end_time - start_time} seconds') + return result + return wrapper + +class MinimalPublisher(Node): + def __init__(self): + super().__init__('minimal_publisher') + self.publisher_ = self.create_publisher(String, 'topic', 10) + self.timer = self.create_timer(0.5, self.timer_callback) + + @trace + def timer_callback(self): + msg = String() + msg.data = 'Hello World: %s' % time.time() + self.publisher_.publish(msg) + self.get_logger().info('Publishing: "%s"' % msg.data) + +def main(args=None): + rclpy.init(args=args) + minimal_publisher = MinimalPublisher() + rclpy.spin(minimal_publisher) + minimal_publisher.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/python_wip/inference_comparison.py b/python_wip/inference_comparison.py index dfe6d76..95ad560 100644 --- a/python_wip/inference_comparison.py +++ b/python_wip/inference_comparison.py @@ -252,4 +252,37 @@ def infer_with_torch_trt(self): output_torch_trt, torch_trt_time = comparison.infer_with_torch_trt() print(f"Torch2TRT Inference Time: {torch_trt_time:.6f} seconds") -## compare bounding box output to the expected from the file... \ No newline at end of file +## compare bounding box output to the expected from the file... + +# Warmup Phase: +# Each engine runs a warmup phase with 10 inference passes using random input. This ensures the timing reflects actual performance after the engine is "warmed up." +# Buffer Allocation: +# For TensorRT-based models (trt_normal and trt_stripped), buffers for input and output tensors are allocated using CUDA. Host-pinned memory is allocated to enable efficient asynchronous execution. +# Inference Timing: +# For each inference method (normal TensorRT, stripped TensorRT, and torch2trt), the inference time is measured after warmup. The random input is transferred to the device, and then the output is copied back to the host after inference. +# Execution Context: +# Each TensorRT-based model (trt_normal, trt_stripped) uses an execution context created from the engine. +# torch2trt: +# torch2trt models are loaded using TRTModule, and inference is performed using PyTorch tensors. + +# torch trt? +import torch +import torch_tensorrt as torch_trt + +# Sample PyTorch model (ResNet18 in this case) +model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True) +model.eval().cuda() + +# Example input tensor +input_data = torch.randn((1, 3, 224, 224)).cuda() + +# Convert the PyTorch model to a Torch-TensorRT optimized model +trt_model = torch_trt.compile(model, + inputs=[torch_trt.Input(input_data.shape)], + enabled_precisions={torch.float, torch.half}) # Use FP32 and FP16 + +# Run inference +with torch.no_grad(): + output = trt_model(input_data) + +print(output) diff --git a/python_wip/integrated_serial.py b/python_wip/integrated_serial.py new file mode 100644 index 0000000..6c22f20 --- /dev/null +++ b/python_wip/integrated_serial.py @@ -0,0 +1,38 @@ +import rclpy +from rclpy.node import Node +import serial +import time +from std_msgs.msg import String # Example message type + +class ArduinoSerialNode(Node): + def __init__(self): + super().__init__('arduino_serial_node') + self.subscription = self.create_subscription( + String, + 'your_topic_name', + self.listener_callback, + 10 + ) + self.subscription # prevent unused variable warning + + # Open serial port to Arduino + self.ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1) # Adjust USB port as needed + time.sleep(2) # Wait for Arduino to reset + + def listener_callback(self, msg): + # Serialize and send the message to Arduino + serialized_msg = msg.data + '\n' # Add a newline as a delimiter + self.ser.write(serialized_msg.encode()) + self.get_logger().info('Sent to Arduino: "%s"' % msg.data) + +def main(args=None): + rclpy.init(args=args) + arduino_serial_node = ArduinoSerialNode() + rclpy.spin(arduino_serial_node) + arduino_serial_node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() + +# pip3 install pyserial \ No newline at end of file diff --git a/python_wip/interop-cupy-cv.py b/python_wip/interop-cupy-cv.py new file mode 100644 index 0000000..1563aa1 --- /dev/null +++ b/python_wip/interop-cupy-cv.py @@ -0,0 +1,43 @@ +import cupy as cp +import cv2 +import numpy as np + +# Create a custom CUDA stream using CuPy +cuda_stream = cp.cuda.Stream() + +# Allocate a GPU array using CuPy +cupy_array = cp.random.random((224, 224, 3), dtype=cp.float32) + +# Perform some CuPy operations on the custom stream +with cuda_stream: + cupy_array = cp.sqrt(cupy_array) # Example CuPy operation + +# Sync the stream to ensure CuPy operations are done before OpenCV operation +cuda_stream.synchronize() + +# Convert CuPy array to a NumPy array (on the CPU) +# OpenCV doesn't natively support CuPy arrays, so transfer data back to host +numpy_array = cp.asnumpy(cupy_array) + +# Convert NumPy array to OpenCV's GPU Mat +gpu_mat = cv2.cuda_GpuMat() +gpu_mat.upload(numpy_array) + +# Perform an OpenCV CUDA operation +# OpenCV CUDA functions generally don't support custom streams directly +gpu_mat = cv2.cuda.resize(gpu_mat, (128, 128)) + +# Optionally, download the result back to the CPU +result = gpu_mat.download() + +# cropping in cupy: +import cupy as cp + +# Assume you have a CuPy array (image) of shape (height, width, channels) +image = cp.random.rand(224, 224, 3).astype(cp.float32) # Example image + +# Define the crop region (x, y, width, height) +x, y, w, h = 50, 50, 100, 100 + +# Crop the image (this works similarly to NumPy slicing) +cropped_image = image[y:y+h, x:x+w, :] \ No newline at end of file diff --git a/python_wip/multiple_cuda_stream_manager.py b/python_wip/multiple_cuda_stream_manager.py new file mode 100644 index 0000000..05de43e --- /dev/null +++ b/python_wip/multiple_cuda_stream_manager.py @@ -0,0 +1,28 @@ +# Modified cuda_manager.py +import pycuda.driver as cuda + +class CudaStreamManager: + def __init__(self): + # Create separate streams for different stages + self.preprocess_stream = cuda.Stream() + self.inference_stream = cuda.Stream() + self.postprocess_stream = cuda.Stream() + + # Create CUDA events for synchronization + self.preprocess_done = cuda.Event() + self.inference_done = cuda.Event() + + def get_preprocess_stream(self): + return self.preprocess_stream + + def get_inference_stream(self): + return self.inference_stream + + def get_postprocess_stream(self): + return self.postprocess_stream + + def get_preprocess_event(self): + return self.preprocess_done + + def get_inference_event(self): + return self.inference_done diff --git a/python_wip/preprocess_comparison.py b/python_wip/preprocess_comparison.py index eb2e0b6..7bfa7f9 100644 --- a/python_wip/preprocess_comparison.py +++ b/python_wip/preprocess_comparison.py @@ -95,3 +95,7 @@ def preprocess_with_opencv_cuda(): print(f"CuPy Preprocessing Time: {cupy_time:.4f} seconds") print(f"PyTorch Preprocessing Time: {pytorch_time:.4f} seconds") print(f"OpenCV with CUDA Preprocessing Time: {opencv_cuda_time:.4f} seconds") + +# CuPy Method: Uses CuPy for GPU-based operations. The image is uploaded to the GPU using CuPy, preprocessed (alpha channel removal, cropping, resizing, normalization), and the operation is timed. +# PyTorch Method: Uses PyTorch for GPU-based preprocessing, applying operations like cropping, resizing, and normalization using PyTorch’s transformation pipeline. +# OpenCV with CUDA Method: Uses OpenCV's CUDA functionality. The image is uploaded to the GPU with cv2.cuda_GpuMat and operations like cropping, resizing, and normalizing are performed. \ No newline at end of file diff --git a/python_wip/vectorized_example.py b/python_wip/vectorized_example.py new file mode 100644 index 0000000..5f4f70a --- /dev/null +++ b/python_wip/vectorized_example.py @@ -0,0 +1,70 @@ +import numpy as np +import cv2 + +def object_filter(self, image, bboxes): + detections = [] + h, w, _ = image.shape + + # Create a mask for all ROIs + for bbox in bboxes: + x1, y1, w, h = bbox + x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h # Adjust to x2, y2 format + + # Ensure the ROI is within image bounds + roi = image[max(0, y1):min(h, y1 + h), max(0, x1):min(w, x1 + w)] + hsv_roi = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV) + + # Apply color segmentation mask + mask = cv2.inRange(hsv_roi, tuple(self.lower_range), tuple(self.upper_range)) + _, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY) + + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # Process contours in a vectorized manner + areas = np.array([cv2.contourArea(cnt) for cnt in contours]) + valid_contours = contours[areas > self.min_area] + + for cnt in valid_contours: + x, y, w, h = cv2.boundingRect(cnt) + detections.append((x, y, x + w, y + h)) + + self.verify_object(image, detections) + +def verify_object(self, disp_image, bboxes): + roi_x1, roi_y1, roi_x2, roi_y2 = self.roi_list + original_height, original_width = original_image_shape + model_height, model_width = model_dimensions + + shifted_x = roi_x + abs(velocity[0]) * shift_constant + scale_x = roi_w / model_width + scale_y = roi_h / model_height + + adjusted_bboxes = [] + + # Convert bboxes to a NumPy array for vectorized operations + bboxes_array = np.array(bboxes) + + # Reverse the resize operation + x_min = (bboxes_array[:, 0] * scale_x).astype(int) + y_min = (bboxes_array[:, 1] * scale_y).astype(int) + x_max = (bboxes_array[:, 2] * scale_x).astype(int) + y_max = (bboxes_array[:, 3] * scale_y).astype(int) + + # Reverse the cropping operation + x_min += shifted_x + y_min += roi_y + x_max += shifted_x + y_max += roi_y + + # Ensure the bounding box doesn't exceed the original image dimensions + x_min = np.clip(x_min, 0, original_width) + y_min = np.clip(y_min, 0, original_height) + x_max = np.clip(x_max, 0, original_width) + y_max = np.clip(y_max, 0, original_height) + + adjusted_bboxes = np.vstack((x_min, y_min, x_max, y_max)).T.tolist() + + # Check if adjusted bounding boxes are within the ROI + for bbox in adjusted_bboxes: + if bbox[0] >= roi_x1 and bbox[2] <= roi_x2 and bbox[1] >= roi_y1 and bbox[3] <= roi_y2: + self.on = 1 diff --git a/python_wip/vectorized_pytorch_example.py b/python_wip/vectorized_pytorch_example.py new file mode 100644 index 0000000..f111928 --- /dev/null +++ b/python_wip/vectorized_pytorch_example.py @@ -0,0 +1,47 @@ +import torch + +def verify_object(self, disp_image, bboxes): + roi_x1, roi_y1, roi_x2, roi_y2 = self.roi_list + original_height, original_width = original_image_shape + model_height, model_width = model_dimensions + + shifted_x = roi_x + abs(velocity[0]) * shift_constant + scale_x = roi_w / model_width + scale_y = roi_h / model_height + + # Convert bounding boxes to a PyTorch tensor for GPU processing + bboxes_tensor = torch.tensor(bboxes, dtype=torch.float32) + + # Reverse the resize operation using vectorized operations in PyTorch + x_min = (bboxes_tensor[:, 0] * scale_x).to(torch.int32) + y_min = (bboxes_tensor[:, 1] * scale_y).to(torch.int32) + x_max = (bboxes_tensor[:, 2] * scale_x).to(torch.int32) + y_max = (bboxes_tensor[:, 3] * scale_y).to(torch.int32) + + # Reverse the cropping operation + x_min += shifted_x + y_min += roi_y + x_max += shifted_x + y_max += roi_y + + # Ensure the bounding boxes don't exceed the original image dimensions + x_min = torch.clamp(x_min, 0, original_width) + y_min = torch.clamp(y_min, 0, original_height) + x_max = torch.clamp(x_max, 0, original_width) + y_max = torch.clamp(y_max, 0, original_height) + + # Stack the adjusted bounding boxes + adjusted_bboxes = torch.stack((x_min, y_min, x_max, y_max), dim=1) + + # Check if adjusted bounding boxes are within the ROI + for bbox in adjusted_bboxes: + if bbox[0] >= roi_x1 and bbox[2] <= roi_x2 and bbox[1] >= roi_y1 and bbox[3] <= roi_y2: + self.on = 1 + +# Using PyTorch Tensors: The bounding boxes are converted to a PyTorch tensor, which allows for GPU acceleration and efficient batch processing. + +# Vectorized Operations: All calculations (scaling and clipping) are performed using PyTorch’s built-in functions, which are optimized for performance. + +# Clamping: The torch.clamp function is used to ensure bounding box coordinates are within valid ranges, similar to np.clip. + +# Condition Checks: The condition checks for bounding boxes being within the ROI are retained in the loop but could also be vectorized if needed. However, since it involves logic checks with conditions that might vary for each box, a loop is simpler here. \ No newline at end of file diff --git a/tools/calibration/colorspace_calibration.py b/tools/calibration/colorspace_calibration.py index f9d3ffe..b2d9189 100644 --- a/tools/calibration/colorspace_calibration.py +++ b/tools/calibration/colorspace_calibration.py @@ -13,7 +13,7 @@ val_dark = 255 default_area = 100 -def color_calibrator(image_path='C:/Users/ishaa/Coding Projects/Applied-AI/ROS/assets/maize'): +def color_calibrator(image_path='C:/Users/ishaa/Coding Projects/ROS/assets/maize'): if not os.path.exists(image_path): raise ValueError(f"Images folder not found at {image_path}") if len(os.listdir(image_path)) == 0: diff --git a/tools/conversion/engine_builder.py b/tools/conversion/engine_builder.py index 69d8349..27301ee 100644 --- a/tools/conversion/engine_builder.py +++ b/tools/conversion/engine_builder.py @@ -138,4 +138,22 @@ def convert_onnx_to_trt(model_path="/home/user/Downloads/model.onnx", output_pat parser.add_argument('--verify', type=bool, default=True, help="Verify the converted engine output") args = parser.parse_args() - convert_onnx_to_trt(args.model_path, args.output_path, args.FP16, args.strip_weights, args.gs_optimize, args.verbose) \ No newline at end of file + convert_onnx_to_trt(args.model_path, args.output_path, args.FP16, args.strip_weights, args.gs_optimize, args.verbose) + +# stripped: +import tensorrt as trt + +# Create a builder and network +builder = trt.Builder(trt.Logger(trt.Logger.WARNING)) +network = builder.create_network() + +# Define the engine configuration and specify optimizations +config = builder.create_builder_config() +config.set_flag(trt.BuilderFlag.STRICT_TYPES) + +# Build the engine +engine = builder.build_engine(network, config) + +# Serialize and save the engine (this would be a stripped engine if no debug flags are set) +with open("model_stripped.engine", "wb") as f: + f.write(engine.serialize()) \ No newline at end of file diff --git a/workspace_python/ros2_ws/src/python_workspace/launch/old_composable_launch.py b/workspace_python/ros2_ws/src/python_workspace/launch/old_composable_launch.py new file mode 100644 index 0000000..e2135bf --- /dev/null +++ b/workspace_python/ros2_ws/src/python_workspace/launch/old_composable_launch.py @@ -0,0 +1,47 @@ +import os +from launch import LaunchDescription +from launch_ros.actions import PushRosNamespace +from launch_ros.actions import Node +from launch.substitutions import LaunchConfiguration + +def generate_launch_description(): + return LaunchDescription([ + PushRosNamespace('camera_jetson_namespace'), # Optional: Organize nodes under a namespace + + Node( + package='your_camera_package', # Replace with the actual package name for CameraNode + namespace='camera', + executable='camera_node', + name='camera_node', + output='screen', + parameters=[ + { + 'source_type': 'zed', # Example parameter, change as necessary + 'static_image_path': '/home/usr/Desktop/ROS/assets/IMG_1822_14.JPG', + 'video_path': '/home/usr/Desktop/ROS/assets/video.mp4', + 'loop': 0, # Example value, adjust accordingly + 'frame_rate': 30, + 'model_dimensions': (448, 1024), + 'camera_side': 'left', + 'shift_constant': 1, + 'roi_dimensions': [0, 0, 100, 100], + 'precision': 'fp32', # Example precision + } + ], + ), + + Node( + package='your_jetson_package', # Replace with the actual package name for JetsonNode + namespace='jetson', + executable='jetson_node', + name='jetson_node', + output='screen', + parameters=[ + { + 'engine_path': '/home/user/Downloads/model.engine', # Replace with the actual engine path + 'strip_weights': False, + 'precision': 'fp32', # Example precision + } + ], + ), + ]) diff --git a/workspace_python/ros2_ws/src/python_workspace/launch/old_launch.py b/workspace_python/ros2_ws/src/python_workspace/launch/old_launch.py new file mode 100644 index 0000000..513ff58 --- /dev/null +++ b/workspace_python/ros2_ws/src/python_workspace/launch/old_launch.py @@ -0,0 +1,124 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration + +def generate_launch_description(): + return LaunchDescription([ + + # Declare launch arguments + DeclareLaunchArgument( + 'engine_path', default_value='/home/user/Downloads/model.engine', + description='Path to the TensorRT engine file'), + + DeclareLaunchArgument( + 'strip_weights', default_value='False', + description='Whether to strip weights from the model'), + + DeclareLaunchArgument( + 'precision', default_value='fp32', + description='Inference precision (fp32, fp16)'), + + DeclareLaunchArgument( + 'use_display_node', default_value='True', + description='Whether to use the display node in ExterminationNode'), + + DeclareLaunchArgument( + 'lower_range', default_value='[78, 158, 124]', + description='Lower HSV range for color segmentation'), + + DeclareLaunchArgument( + 'upper_range', default_value='[60, 255, 255]', + description='Upper HSV range for color segmentation'), + + DeclareLaunchArgument( + 'min_area', default_value='100', + description='Minimum area for object detection'), + + DeclareLaunchArgument( + 'min_confidence', default_value='0.5', + description='Minimum confidence for object detection'), + + DeclareLaunchArgument( + 'roi_list', default_value='[0, 0, 100, 100]', + description='Region of interest for detection'), + + DeclareLaunchArgument( + 'publish_rate', default_value='10', + description='Publishing rate for the extermination node'), + + DeclareLaunchArgument( + 'side', default_value='left', + description='Side of the camera (left or right)'), + + DeclareLaunchArgument( + 'frame_rate', default_value='30', + description='Frame rate for CameraNode publishing'), + + DeclareLaunchArgument( + 'model_dimensions', default_value='[448, 1024]', + description='Model dimensions for CameraNode preprocessing'), + + DeclareLaunchArgument( + 'camera_side', default_value='left', + description='Camera side (left or right)'), + + DeclareLaunchArgument( + 'shift_constant', default_value='1', + description='Shift constant for camera adjustments'), + + DeclareLaunchArgument( + 'roi_dimensions', default_value='[0, 0, 100, 100]', + description='Region of interest dimensions for CameraNode'), + + DeclareLaunchArgument( + 'precision', default_value='fp32', + description='Precision for image processing (fp32 or fp16)'), + + # Launch JetsonNode + Node( + package='your_package_name', + executable='jetson_node', + name='jetson_node', + output='screen', + parameters=[ + {'engine_path': LaunchConfiguration('engine_path')}, + {'strip_weights': LaunchConfiguration('strip_weights')}, + {'precision': LaunchConfiguration('precision')} + ] + ), + + # Launch ExterminationNode + Node( + package='your_package_name', + executable='extermination_node', + name='extermination_node', + output='screen', + parameters=[ + {'use_display_node': LaunchConfiguration('use_display_node')}, + {'lower_range': LaunchConfiguration('lower_range')}, + {'upper_range': LaunchConfiguration('upper_range')}, + {'min_area': LaunchConfiguration('min_area')}, + {'min_confidence': LaunchConfiguration('min_confidence')}, + {'roi_list': LaunchConfiguration('roi_list')}, + {'publish_rate': LaunchConfiguration('publish_rate')}, + {'side': LaunchConfiguration('side')} + ] + ), + + # Launch CameraNode + Node( + package='your_package_name', + executable='camera_node', + name='camera_node', + output='screen', + parameters=[ + {'frame_rate': LaunchConfiguration('frame_rate')}, + {'model_dimensions': LaunchConfiguration('model_dimensions')}, + {'camera_side': LaunchConfiguration('camera_side')}, + {'shift_constant': LaunchConfiguration('shift_constant')}, + {'roi_dimensions': LaunchConfiguration('roi_dimensions')}, + {'precision': LaunchConfiguration('precision')} + ] + ), + ])