亚博microros小车-原生ubuntu支持系列 27、手掌控制小车运动

发布于:2025-02-11 ⋅ 阅读:(13) ⋅ 点赞:(0)

背景知识

本节跟上一个测试类似:亚博microros小车-原生ubuntu支持系列:26手势控制小车基础运动-CSDN博客

都是基于MediaPipe hands做手掌、手指识别的。

为了方便理解,在贴一下手指关键点分布。手掌位置就是靠第9点来识别的。

2、程序说明

本节案例在机器人主控上可能会运行速度很卡顿,可以识别到手掌之后先把小车架起来测试,这样效果会好一些。

小车会根据手掌在画面中的位置,控制底盘的运动。

手掌在画面上方->小车前进

手掌在画面下方->小车后退

手掌在画面左方->小车左移

手掌在画面下方->小车右移

3 启动命令

启动小车代理、图像代理

sudo docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 8090 -v4

sudo docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 9999 -v4

终端输入,

ros2 run yahboom_esp32ai_car RobotCtrl 

不太好区别,加上指点图

我大概用画笔简单示意下。判断标准可以自行调整x,y的值,运行日志

ohu@bohu-TM1701:~/yahboomcar/yahboomcar_ws$ ros2 run yahboom_esp32ai_car RobotCtrl 
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1739176022.123238  464141 gl_context_egl.cc:85] Successfully initialized EGL. Major : 1 Minor: 5
I0000 00:00:1739176022.187429  464194 gl_context.cc:369] GL version: 3.2 (OpenGL ES 3.2 Mesa 23.2.1-1ubuntu3.1~22.04.3), renderer: Mesa Intel(R) UHD Graphics 620 (KBL GT2)
start it
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
W0000 00:00:1739176026.985982  464181 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
W0000 00:00:1739176027.850109  464183 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
W0000 00:00:1739176027.890610  464180 landmark_projection_calculator.cc:186] Using NORM_RECT without IMAGE_DIMENSIONS is only supported for the square ROI. Provide IMAGE_DIMENSIONS or use PROJECTION_MATRIX.
x 295
value: x:0.2,y:0.2
x 277
value: x:0.2,y:0.2
x 280
value: x:0.2,y:0.2
x 612
value: x:0.0,y:-0.2
x 622
value: x:0.0,y:-0.2
x 622
value: x:0.0,y:-0.2
x 614
value: x:0.0,y:-0.2
x 620
value: x:0.0,y:-0.2
x 607
value: x:0.0,y:-0.2
x 588
value: x:0.0,y:-0.2
x 586
value: x:0.0,y:-0.2
x 569
value: x:0.0,y:-0.2
x 562
value: x:0.0,y:-0.2
x 564
value: x:0.0,y:-0.2
x 550
value: x:0.0,y:-0.2
x 537
value: x:0.0,y:-0.2
x 463
value: x:0.0,y:-0.2
x 384
value: x:0.0,y:-0.2
x 313
value: x:0.0,y:0.0
x 262
value: x:0.0,y:0.2
x 231
value: x:0.0,y:0.2

代码

#!/usr/bin/env python3
# encoding: utf-8
import threading
import cv2 as cv
import numpy as np
from yahboom_esp32ai_car.media_library import *
import time
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int32, Bool,UInt16
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImage

from rclpy.time import Time
import datetime


class HandCtrlArm(Node):
    def __init__(self,name):
        super().__init__(name)
        self.pub_Servo1 = self.create_publisher(Int32,"servo_s1" , 10)
        self.pub_Servo2 = self.create_publisher(Int32,"servo_s2" , 10)

        self.PWMServo_X = 0
        self.PWMServo_Y = 45
        self.s1_init_angle = Int32()
        self.s1_init_angle.data = self.PWMServo_X
        self.s2_init_angle = Int32()
        self.s2_init_angle.data = self.PWMServo_Y
        self.media_ros = Media_ROS()

        #确保角度正常处于中间
        for i in range(10):
            self.pub_Servo1.publish(self.s1_init_angle)
            self.pub_Servo2.publish(self.s2_init_angle)
            time.sleep(0.1)

        self.hand_detector = HandDetector()
        self.arm_status = True
        self.locking = True
        self.init = True
        self.pTime = 0
        self.add_lock = self.remove_lock = 0
        self.event = threading.Event()
        self.event.set()

    def process(self, frame):
        frame, lmList, bbox = self.hand_detector.findHands(frame)
        if len(lmList) != 0:
            threading.Thread(target=self.arm_ctrl_threading, args=(lmList,bbox)).start()
        else:
            self.media_ros.pub_vel(0.0,0.0,0.0)
        self.media_ros.pub_imgMsg(frame)
        return frame

    def arm_ctrl_threading(self, lmList,bbox):
        if self.event.is_set():
            self.event.clear()
            fingers = self.hand_detector.fingersUp(lmList)
            self.hand_detector.draw = True
            #gesture = self.hand_detector.get_gesture(lmList)
            self.arm_status = False
            point_x = lmList[9][1]
            point_y = lmList[9][2]

            print("x",point_x)
            if point_y >= 270: x = -0.2
            elif point_y <= 150: x = 0.2
            else: x = 0.0
            if point_x >= 350: y = -0.2
            elif point_x <= 300: y = 0.2
            else: y = 0.0
            self.media_ros.pub_vel(x,0.0,y)
            print(f'value: x:{x},y:{y}')
            self.arm_status = True
            self.event.set()


class MY_Picture(Node):
    def __init__(self, name):
        super().__init__(name)
        self.bridge = CvBridge()
        self.sub_img = self.create_subscription(
            CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像
        
        self.handctrlarm = HandCtrlArm('handctrl')
        self.last_stamp = None
        self.new_seconds = 0
        self.fps_seconds = 1

    def handleTopic(self, msg):
        self.last_stamp = msg.header.stamp  
        if self.last_stamp:
            total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanoseconds
            delta = datetime.timedelta(seconds=total_secs * 1e-9)
            seconds = delta.total_seconds()*100

            if self.new_seconds != 0:
                self.fps_seconds = seconds - self.new_seconds

            self.new_seconds = seconds#保留这次的值

        start = time.time()
        frame = self.bridge.compressed_imgmsg_to_cv2(msg)
        frame = cv.resize(frame, (640, 480))

        action = cv.waitKey(1) & 0xFF
        frame = self.handctrlarm.process(frame)
        if action == ord('q'):
            self.handctrlarm.media_ros.cancel()

        
        end = time.time()
        fps = 1 / ((end - start)+self.fps_seconds)
        
        text = "FPS : " + str(int(fps))
        cv.putText(frame, text, (10,20), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)
        cv.imshow('frame', frame)



def main():
    rclpy.init() 
    esp_img = MY_Picture("My_Picture")
    print("start it")
    try:
        rclpy.spin(esp_img)
    except KeyboardInterrupt:
        pass
    finally:
        esp_img.destroy_node()
        rclpy.shutdown()

主要逻辑:其实获取到的就是咱们手掌中指的第一个关节的坐标(第9点),通过判断这个坐标在画面中的位置来发送给底盘xy方向上的速度,即可实现控制。 

补充一个节点通信图