代码分为物体识别、坐标转换和机械臂控制三部分:
import cv2
import numpy as np
from pymycobot import UltraArm
import time
# 初始化摄像头
cap = cv2.VideoCapture(0)
cap.set(3, 640) # 宽度
cap.set(4, 480) # 高度
# 加载物体检测模型(MobileNet SSD示例)
prototxt = "MobileNetSSD_deploy.prototxt"
model = "MobileNetSSD_deploy.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt, model)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# 初始化机械臂
arm = UltraArm("/dev/ttyACM0") # 根据实际串口修改
arm.set_speed(50) # 设置运动速度
# 预设储物盒坐标(示例坐标,需实际测量)
storage_boxes = {
"A": [100, 50, 150], # [x, y, z]
"B": [-100, 50, 150]
}
# 坐标转换参数(需要校准)
IMAGE_WIDTH = 640
IMAGE_HEIGHT = 480
ARM_X_RANGE = 250 # 机械臂X轴工作范围±250mm
ARM_Y_RANGE = 250 # 机械臂Y轴工作范围±250mm
def img_to_arm_coords(x_pixel, y_pixel):
将图像坐标转换为机械臂坐标
x_arm = (x_pixel/IMAGE_WIDTH - 0.5) * 2 * ARM_X_RANGE
y_arm = (0.5 - y_pixel/IMAGE_HEIGHT) * 2 * ARM_Y_RANGE
return round(x_arm, 2), round(y_arm, 2)
def pick_and_place(obj_x, obj_y, target_box):
抓放物体流程
# 移动到物体上方
arm.set_position(obj_x, obj_y, 150, speed=50)
time.sleep(2)
# 下降到物体位置
arm.set_position(obj_x, obj_y, 50, speed=30)
time.sleep(1)
# 夹取物体
arm.set_gripper(1) # 关闭夹爪
time.sleep(1)
# 抬升物体
arm.set_position(obj_x, obj_y, 150, speed=30)
time.sleep(1)
# 移动到目标位置
target = storage_boxes[target_box]
arm.set_position(target[0], target[1], target[2], speed=50)
time.sleep(2)
# 释放物体
arm.set_gripper(0) # 打开夹爪
time.sleep(1)
# 返回初始位置
arm.set_position(0, 150, 150, speed=50)
time.sleep(1)
# 主循环
while True:
ret, frame = cap.read()
if not ret:
break
# 物体检测
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
# 处理检测结果
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7: # 置信度阈值
idx = int(detections[0, 0, i, 1])
label = CLASSES[idx]
# 过滤非目标物体
if label not in ["bottle", "cup"]:
continue
# 获取物体位置
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
centerX = (startX + endX) // 2
centerY = (startY + endY) // 2
# 显示检测框
cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2)
cv2.circle(frame, (centerX, centerY), 5, (0,0,255), -1)
# 坐标转换
target_x, target_y = img_to_arm_coords(centerX, centerY)
print(f"检测到物体:{label} 机械臂坐标:({target_x}, {target_y})")
# 执行抓取(示例放置到A盒)
try:
pick_and_place(target_x, target_y, "A")
except Exception as e:
print(f"操作失败:{str(e)}")
break # 每次只处理一个物体
cv2.imshow("Detection", frame)
if cv2.waitKey(1) == 27: # ESC退出
break
# 释放资源
cap.release()
cv2.destroyAllWindows()
arm.release()
代码说明:
1. 硬件准备:
- 确保机械臂已正确连接并通过ls /dev/tty确认串口
- 摄像头安装位置应正对工作区域
- 根据实际布局测量储物盒坐标
2. 依赖安装:
pip install opencv-python numpy pymycobot
3. 坐标校准:
- 调整img_to_arm_coords()中的转换参数
- 通过实际测试验证坐标准确性
4. 物体识别:
- 使用MobileNet SSD模型检测常见物体
- 可根据需求更换为YOLO等更精确的模型
- 通过CLASSES过滤控制目标物体类型
5. 运动控制:
- 采用分层运动策略(先水平移动再垂直升降)
- 通过set_speed()控制运动速度保证稳定性
- 夹爪操作需根据实际型号调整压力参数
6. 安全注意事项:
- 首次运行时建议低速测试(修改set_speed参数)
- 确保机械臂工作范围内无障碍物
- 建议在夹爪末端安装缓冲材料