ROS实验报告二

Topic 与 Service

关于ros中的服务通信机制,是一种同步通信,相较于话题通信,区别如下:

ROS学习笔记6~Topic(话题) 和Service(服务)的区别

话题通信适用于不断更新的数据传输相关的应用场景

对比项 发布/订阅 请求/响应
通信模式 发布者发布消息,订阅者接收消息,多对多关系 客户端向服务器发送请求,服务器返回响应,一对多关系(一个 Server)
同步性 异步 同步
底层协议 ROSTCP/ROSUDP ROSTCP/ROSUDP
缓冲区
实时性
节点关系 多对多 一对多
通信数据 msg srv
使用场景 连续高频的数据发布与接收,如雷达、里程计 偶尔调用或执行某一项特定功能,如拍照、语音识别

Topic通信存在缺陷

(1)无法确认发布信息是否被收到

(2)有些类型的消息(例如相机节点发布的消息),按照一定频率发布,通常会占用比较大的带宽,造成资源浪费

ROS Service

Service通信的特点

(1)Service使用 请求-查询式 的通信模型,只有接受请求才执行服务,简单而且高效

(2)Service通信实现的是 一对一 通信,信息流是双向的,包含两部分,Client与Server

(3)Service是 同步 通信方式

ROS Node Service通信的图形化表示

服务通信较之于话题通信更简单些,理论模型如下图所示,该模型中涉及到三个角色:

  • ROS master(管理者)
  • Server(服务端)
  • Client(客户端)

参考链接:http://www.autolabor.com.cn/book/ROSTutorials/di-2-zhang-ros-jia-gou-she-ji/23-fu-wu-tong-xin.html

实验二要求

实现一个复杂的服务(services)通信,机器视觉识别行人,并返回其坐标。实验报告需提交PDF,文件命名方式:学号+专业姓名+实验1。提交日期/方式:11.1号通过机房电脑系统提交。

要求一:自定义通信srv格式,分析services的通信机制。

要求二:需使用ament_python构建功能包,实现行人识别,并返回其中心点。

要求三:需使用ament_CMake构建功能包,实现行人识别,并返回其中心点。

要求四:PDF附上关键功能代码。

这里的行人检测可以借助于Opencv自带的函数功能,链接如下:https://blog.csdn.net/hasque2019/article/details/124427656 是一个运动物体检测的功能

使用python实现

创建功能包

在homework工作空间下创建功能包hw_2:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
jr_hu@hu1scp:~/homework/src$ ros2 pkg create hw_2 --build-type ament_python --dependencies rclpy
going to create a new package
package name: hw_2
destination directory: /home/jr_hu/homework/src
package format: 3
version: 0.0.0
description: TODO: Package description
maintainer: ['jr_hu <jr_hu@todo.todo>']
licenses: ['TODO: License declaration']
build type: ament_python
dependencies: ['rclpy']
creating folder ./hw_2
creating ./hw_2/package.xml
creating source folder
creating folder ./hw_2/hw_2
creating ./hw_2/setup.py
creating ./hw_2/setup.cfg
creating folder ./hw_2/resource
creating ./hw_2/resource/hw_2
creating ./hw_2/hw_2/__init__.py
creating folder ./hw_2/test
creating ./hw_2/test/test_copyright.py
creating ./hw_2/test/test_flake8.py
creating ./hw_2/test/test_pep257.py

[WARNING]: Unknown license 'TODO: License declaration'. This has been set in the package.xml, but no LICENSE file has been created.
It is recommended to use one of the ament license identitifers:
Apache-2.0
BSL-1.0
BSD-2.0
BSD-2-Clause
BSD-3-Clause
GPL-3.0-only
LGPL-3.0-only
MIT
MIT-0

进入hw_2文件夹下,使用code打开文件

1
2
3
jr_hu@hu1scp:~/homework/src$ cd hw_2/
jr_hu@hu1scp:~/homework/src/hw_2$ code ..
jr_hu@hu1scp:~/homework/src/hw_2$

编写客户端代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import rclpy # ROS2 Python接口库
from rclpy.node import Node # ROS2 节点类
from learning_interface.srv import GetObjectPosition # 自定义的服务接口

class objectClient(Node):
def __init__(self, name):
super().__init__(name) # ROS2节点父类初始化
self.client = self.create_client(GetObjectPosition, 'get_target_position')
while not self.client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
self.request = GetObjectPosition.Request()

def send_request(self):
self.request.get = True
self.future = self.client.call_async(self.request)

def main(args=None):
rclpy.init(args=args) # ROS2 Python接口初始化
node = objectClient("service_client") # 创建ROS2节点对象并进行初始化
node.send_request()

while rclpy.ok():
rclpy.spin_once(node)

if node.future.done():
try:
response = node.future.result()
except Exception as e:
node.get_logger().info(
'Service call failed %r' % (e,))
else:
node.get_logger().info(
'Result of object position:\n x: %d y: %d' %
(response.x, response.y))
break
node.destroy_node() # 销毁节点对象
rclpy.shutdown() # 关闭ROS2 Python接口

编写服务代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rclpy # ROS2 Python接口库
from rclpy.node import Node # ROS2 节点类
from sensor_msgs.msg import Image # 图像消息类型
import numpy as np # Python数值计算库
from cv_bridge import CvBridge # ROS与OpenCV图像转换类
import cv2 # Opencv图像处理库
from learning_interface.srv import GetObjectPosition # 自定义的服务接口

class ImageSubscriber(Node):
def __init__(self, name):
super().__init__(name) # ROS2节点父类初始化
self.sub = self.create_subscription(
Image, 'image_raw', self.listener_callback, 10) # 创建订阅者对象(消息类型、话题名、订阅者回调函数、队列长度)
self.cv_bridge = CvBridge() # 创建一个图像转换对象,用于OpenCV图像与ROS的图像消息的互相转换

self.srv = self.create_service(GetObjectPosition, # 创建服务器对象(接口类型、服务名、服务器回调函数)
'get_target_position',
self.object_position_callback)
self.objectX = 0
self.objectY = 0

def graying(self, image):
height,width = image.shape[:2] # 获取图像高、宽
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 图像从BGR颜色模型转换为灰度模型

# 应用sobel算子通过计算图像的梯度来实现边缘检测
sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3)
sobely = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=3)
gradient_magnitude = cv2.magnitude(sobelx,sobely)
_, thresh = cv2.threshold(gradient_magnitude,20,255,cv2.THRESH_BINARY) # 阈值处理
cv2.imshow('sobel edge detection',thresh)
cv2.waitKey(5)

def object_detect(self, image):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#创建混合高斯模型用于背景建模
fgbg = cv2.createBackgroundSubtractorMOG2()

while(True):
fgmask = fgbg.apply(image)
#形态学开运算去噪点
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
#寻找视频中的轮廓
im, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

for c in contours:
#计算各轮廓的周长
perimeter = cv2.arcLength(c,True)
if perimeter > 188:
#找到一个直矩形(不会旋转)
x,y,w,h = cv2.boundingRect(c)
#画出这个矩形
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
self.objectX = int(x+w/2)
self.objectY = int(y+h/2)

cv2.imshow('frame',frame)
cv2.imshow('fgmask', fgmask)
k = cv2.waitKey(150) & 0xff
if k == 27:
break

def listener_callback(self, data):
self.get_logger().info('Receiving video frame') # 输出日志信息,提示已进入回调函数
image = self.cv_bridge.imgmsg_to_cv2(data, 'bgr8') # 将ROS的图像消息转化成OpenCV图像
self.object_detect(image)

def object_position_callback(self, request, response): # 创建回调函数,执行收到请求后对数据的处理
if request.get == True:
response.x = self.objectX # 目标物体的XY坐标
response.y = self.objectY
self.get_logger().info('Object position\nx: %d y: %d' %
(response.x, response.y)) # 输出日志信息,提示已经反馈
else:
response.x = 0
response.y = 0
self.get_logger().info('Invalid command') # 输出日志信息,提示已经反馈
return response

def main(args=None): # ROS2节点主入口main函数
rclpy.init(args=args) # ROS2 Python接口初始化
node = ImageSubscriber("service_server") # 创建ROS2节点对象并进行初始化
rclpy.spin(node) # 循环等待ROS2退出
node.destroy_node() # 销毁节点对象
rclpy.shutdown() # 关闭ROS2 Python接口

出现报错是因为cv版本与上述参考博客的版本不一

1
ValueError: not enough values to unpack (expected 3, got 2)

在OpenCV 4.x版本中,cv2.findContours 函数只返回两个值:contours 和 hierarchy。而在OpenCV 3.x版本中,它返回三个值:image, contours, 和 hierarchy

修正后的服务代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import rclpy # ROS2 Python接口库
from rclpy.node import Node # ROS2 节点类
from sensor_msgs.msg import Image # 图像消息类型
import numpy as np # Python数值计算库
from cv_bridge import CvBridge # ROS与OpenCV图像转换类
import cv2 # Opencv图像处理库
from learning_interface.srv import GetObjectPosition # 自定义的服务接口

class ImageSubscriber(Node):
def __init__(self, name):
super().__init__(name) # ROS2节点父类初始化
self.sub = self.create_subscription(
Image, 'image_raw', self.listener_callback, 10) # 创建订阅者对象(消息类型、话题名、订阅者回调函数、队列长度)
self.cv_bridge = CvBridge() # 创建一个图像转换对象,用于OpenCV图像与ROS的图像消息的互相转换

self.srv = self.create_service(GetObjectPosition, # 创建服务器对象(接口类型、服务名、服务器回调函数)
'get_target_position',
self.object_position_callback)
self.objectX = 0
self.objectY = 0

def graying(self, image):
height, width = image.shape[:2] # 获取图像高、宽
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 图像从BGR颜色模型转换为灰度模型

# 应用sobel算子通过计算图像的梯度来实现边缘检测
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
gradient_magnitude = cv2.magnitude(sobelx, sobely)
_, thresh = cv2.threshold(gradient_magnitude, 20, 255, cv2.THRESH_BINARY) # 阈值处理
cv2.imshow('sobel edge detection', thresh)
cv2.waitKey(1)

def object_detect(self, image):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# 创建混合高斯模型用于背景建模
fgbg = cv2.createBackgroundSubtractorMOG2()

fgmask = fgbg.apply(image)
# 形态学开运算去噪点
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# 寻找视频中的轮廓
contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

for c in contours:
# 计算各轮廓的周长
perimeter = cv2.arcLength(c, True)
if perimeter > 2236.5: # 这个参数需要根据设备和场景调整适配
# 找到一个直矩形(不会旋转)
x, y, w, h = cv2.boundingRect(c)
# 画出这个矩形
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
self.objectX = int(x + w / 2)
self.objectY = int(y + h / 2)

cv2.imshow('frame', image)
cv2.imshow('fgmask', fgmask)
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
rclpy.shutdown()

def listener_callback(self, data):
self.get_logger().info('Receiving video frame') # 输出日志信息,提示已进入回调函数
image = self.cv_bridge.imgmsg_to_cv2(data, 'bgr8') # 将ROS的图像消息转化成OpenCV图像
self.object_detect(image)

def object_position_callback(self, request, response): # 创建回调函数,执行收到请求后对数据的处理
if request.get == True:
response.x = self.objectX # 目标物体的XY坐标
response.y = self.objectY
self.get_logger().info('Object position\nx: %d y: %d' %
(response.x, response.y)) # 输出日志信息,提示已经反馈
else:
response.x = 0
response.y = 0
self.get_logger().info('Invalid command') # 输出日志信息,提示已经反馈
return response

def main(args=None): # ROS2节点主入口main函数
rclpy.init(args=args) # ROS2 Python接口初始化
node = ImageSubscriber("service_server") # 创建ROS2节点对象并进行初始化
rclpy.spin(node) # 循环等待ROS2退出
node.destroy_node() # 销毁节点对象
rclpy.shutdown() # 关闭ROS2 Python接口

if __name__ == '__main__':
main()

因为这个参数perimeter的阈值一直找不到合适的,多次尝试后,效果并不好,换一个尝试一下

另一种思路的服务代码

参考链接:

https://blog.csdn.net/m0_57442556/article/details/141415970?fromshare=blogdetail&sharetype=blogdetail&sharerId=141415970&sharerefer=PC&sharesource=m0_74235619&sharefrom=from_link

这种行人检测效果略好一点,调相机或者本地视频都可以实现,需要注意视频的话最好用绝对地址

虽然没有达到理想的效果,但是也能凑合用,en...

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import rclpy # ROS2 Python接口库
from rclpy.node import Node # ROS2 节点类
from cv_bridge import CvBridge # ROS与OpenCV图像转换类
import cv2 # OpenCV图像处理库
from learning_interface.srv import GetObjectPosition # 自定义的服务接口
import time # 用于添加延迟

class VideoProcessor(Node):
def __init__(self, name):
super().__init__(name) # ROS2节点父类初始化
self.cv_bridge = CvBridge() # 创建一个图像转换对象,用于OpenCV图像与ROS的图像消息的互相转换

self.srv = self.create_service(GetObjectPosition, # 创建服务器对象(接口类型、服务名、服务器回调函数)
'get_target_position',
self.object_position_callback)
self.objectX = 0
self.objectY = 0
self.video_path = '/home/jr_hu/homework_1/src/hw_2/hw_2/detect.mp4' # 视频文件路径
self.cap = cv2.VideoCapture(self.video_path) # 打开本地视频文件
self.timer = self.create_timer(0.0002, self.process_frame) # 创建定时器,定期处理视频帧

def detect(self, image, winStride, padding, scale, useMeanshiftGrouping):
hog = cv2.HOGDescriptor() # 初始化方向梯度直方图描述器
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) # 设置SVM为一个预先训练好的行人检测器
(rects, weights) = hog.detectMultiScale(image,
winStride=winStride,
padding=padding,
scale=scale,
useMeanshiftGrouping=useMeanshiftGrouping)
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
self.objectX = int(x + w / 2)
self.objectY = int(y + h / 2)

cv2.imshow("Detected People", image) # 显示检测结果
cv2.waitKey(1)

def process_frame(self):
ret, frame = self.cap.read() # 从视频捕获一帧
if not ret:
self.get_logger().info('Video ended, restarting...') # 视频结束,重新打开视频文件
self.cap.release() # 释放当前视频文件
self.cap = cv2.VideoCapture(self.video_path) # 重新打开视频文件
if not self.cap.isOpened():
self.get_logger().error(f'Failed to restart the video: {self.video_path}') # 如果重新打开失败,输出错误信息
return

ret, frame = self.cap.read() # 重新读取第一帧
if not ret:
self.get_logger().error('Failed to read the first frame after restarting.') # 如果重新读取第一帧失败,输出错误信息
return

winStride = (8, 8) # 每次行移动8个像素点,列移动8个像素点
padding = (2, 2) # 边缘添加像素点 通常可取(8, 8) (16, 16) (24, 24) (32, 32)
scale = 1.01 # 构造金字塔结构采取的比例值
useMeanshiftGrouping = False # 消除重叠
self.detect(frame, winStride, padding, scale, useMeanshiftGrouping)

def object_position_callback(self, request, response): # 创建回调函数,执行收到请求后对数据的处理
if request.get == True:
response.x = self.objectX # 目标物体的XY坐标
response.y = self.objectY
self.get_logger().info('Object position\nx: %d y: %d' %
(response.x, response.y))
else:
response.x = 0
response.y = 0
self.get_logger().info('Invalid command')
return response

def main(args=None): # ROS2节点主入口main函数
rclpy.init(args=args) # ROS2 Python接口初始化
node = VideoProcessor("video_processor") # 创建ROS2节点对象并进行初始化
rclpy.spin(node) # 循环等待ROS2退出
node.cap.release() # 释放视频文件
node.destroy_node() # 销毁节点对象
cv2.destroyAllWindows() # 关闭所有OpenCV窗口
rclpy.shutdown() # 关闭ROS2 Python接口

if __name__ == '__main__':
main()

使用cpp实现

安装必要的依赖

确保安装了ROS2和OpenCV,可以使用以下命令来安装OpenCV

1
2
3
sudo apt-get update
sudo apt-get install ros-humble-image-common
sudo apt-get install libopencv-dev

创建ROS2工作空间

同前面的实验

创建ROS2包

在ROS2工作空间中,创建一个名为 hw_2_cpp 的新包,并添加所需的依赖项。

1
ros2 pkg create --build-type ament_cmake hw_2_cpp --dependencies rclcpp cv_bridge sensor_msgs

编写C++代码

在 hw_2_cpp 包中创建一个源文件 src/hw_2_cpp.cpp,并添加代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#include <rclcpp/rclcpp.hpp>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/msg/image.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>

using namespace std::chrono_literals;

class VideoProcessorNode : public rclcpp::Node {
public:
VideoProcessorNode() : Node("video_processor") {
// 初始化HOG描述符
hog = cv::HOGDescriptor();
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());

// 打开视频文件
video_path = "/home/jr_hu/homework_1/src/hw_2/hw_2/detect.mp4";
cap.open(video_path);
if (!cap.isOpened()) {
RCLCPP_ERROR(this->get_logger(), "Failed to open video file: %s", video_path.c_str());
exit(1);
}

// 创建定时器,定期处理视频帧
timer = this->create_wall_timer(100ms, std::bind(&VideoProcessorNode::process_frame, this));
}

private:
cv::HOGDescriptor hog;
cv::VideoCapture cap;
std::string video_path;
rclcpp::TimerBase::SharedPtr timer;

void process_frame() {
cv::Mat frame;
bool ret = cap.read(frame);
if (!ret) {
RCLCPP_INFO(this->get_logger(), "Video ended, restarting...");
cap.release();
std::this_thread::sleep_for(1s);
cap.open(video_path);
if (!cap.isOpened()) {
RCLCPP_ERROR(this->get_logger(), "Failed to restart the video: %s", video_path.c_str());
return;
}
ret = cap.read(frame);
if (!ret) {
RCLCPP_ERROR(this->get_logger(), "Failed to read the first frame after restarting.");
return;
}
}

detect(frame);
}

void detect(cv::Mat &image) {
std::vector<cv::Rect> rects;
std::vector<double> weights;
hog.detectMultiScale(image, rects, weights);

for (const auto &rect : rects) {
cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 2);
int objectX = rect.x + rect.width / 2;
int objectY = rect.y + rect.height / 2;
RCLCPP_INFO(this->get_logger(), "Object position: x=%d, y=%d", objectX, objectY);
}

cv::imshow("Detected People", image);
cv::waitKey(1);
}
};

int main(int argc, char * argv[]) {
rclcpp::init(argc, argv);
rclcpp::spin(std::make_shared<VideoProcessorNode>());
rclcpp::shutdown();
return 0;
}

编译和运行

编辑 CMakeLists.txt 文件,确保包含必要的编译选项

发现出现了报错, CMake 无法找到 OpenCV4 的配置文件

直接全文件搜索:opencvconfig 得到路径 /usr/lib/x86_64-linux-gnu/cmake/opencv4

就可以在CMakeLists.txt文件里手动设置 CMAKE_PREFIX_PATH 环境变量

1
2
# 设置 CMAKE_PREFIX_PATH
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} /usr/lib/x86_64-linux-gnu/cmake/opencv4)

同时在终端设置CMAKE_PREFIX_PATH 环境变量

1
export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH:/usr/lib/x86_64-linux-gnu/cmake/opencv4

CMakeLists.txt完整代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
cmake_minimum_required(VERSION 3.8)
project(hw_2_cpp)

# 设置 CMAKE_PREFIX_PATH
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} /usr/lib/x86_64-linux-gnu/cmake/opencv4)

if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()

# find dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(OpenCV REQUIRED)

add_executable(hw_2_cpp src/hw_2_cpp.cpp)

ament_target_dependencies(hw_2_cpp rclcpp cv_bridge sensor_msgs)

target_link_libraries(hw_2_cpp ${OpenCV_LIBS}) # 链接 OpenCV 库

install(TARGETS
hw_2_cpp
DESTINATION lib/${PROJECT_NAME})

if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# comment the line when a copyright and license is added to all source files
set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# comment the line when this package is in a git repo and when
# a copyright and license is added to all source files
set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()

ament_package()

清理构建目录并重新构建

1
2
3
4
5
# 进入工作空间
rm -rf src/hw_2_cpp/build
colcon build --packages-select hw_2_cpp
source install/setup.bash
ros2 run hw_2_cpp hw_2_cpp

运行结果展示


ROS实验报告二
http://jrhu0048.github.io/2024/10/18/ros/ros-shi-yan-bao-gao-er/
作者
JR.HU
发布于
2024年10月18日
更新于
2024年11月9日
许可协议