Pytorch Yolov11 OBB 旋转框检测+window部署+推理封装 留贴记录

发布于:2025-09-16 ⋅ 阅读:(18) ⋅ 点赞:(0)

Pytorch Yolov11 OBB 旋转框检测+window部署+推理封装 留贴记录


上一章写了下【Pytorch Yolov11目标检测+window部署+推理封装 留贴记录】,这一章开一下YOLOV11 OBB旋转框检测相关的全流程,有些和上一章重复的地方我会简写,要两篇结合着看,好了,开始吧

1.数据标注和格式转换

1.1 依旧是先下载预训练模型/和源码,或者程序运行时自己下载
1.2 主要讲一下数据标注和数据处理成YOLO OBB需要的格式
1.2.1 标注软件 roLabelImg
在conda虚拟环境自己部署和下载吧
和正常labelimg标注框是一样的,多了zxcv键盘按键调整旋转框旋转角度的功能
文件夹规则:
images
labels_all
1.2.2 先看下标完的格式是啥样子的
标完的格式是:name cx cy w h angle
在这里插入图片描述
实际需要的格式是:name x1 y1 x2 y2 x3 y3 x4 y4 归一化

0 0.289583 0.151667 0.471354 0.311667 0.436979 0.708333 0.255208 0.546667
2 0.426042 0.458333 0.457292 0.48 0.450521 0.576667 0.419271 0.555

1.2.3 先格式转换,将xml转为txt
分了好几步做的,至于为什么不一步写到位,因为我也是抄别人的,我python没什么基础,我只想C++能调用起来就行
1.先将标注软件的xml转为了四角点的xml
在这里插入图片描述
2.然后提取出xml中的四角点和label,输出到txt
在这里插入图片描述
3.还不算完哈,需要先划分训练集和测试集,再做最后的转换

# 文件名称   :roxml_to_dota.py
# 功能描述   :把rolabelimg标注的xml文件转换成dota能识别的xml文件,
#             再转换成dota格式的txt文件
#            把旋转框 cx,cy,w,h,angle,或者矩形框cx,cy,w,h,转换成四点坐标x1,y1,x2,y2,x3,y3,x4,y4
import os
import xml.etree.ElementTree as ET
import math

cls_list = ['Target','label','Top']  # 修改为自己的标签

def edit_xml(xml_file, dotaxml_file):
    """
    修改xml文件
    :param xml_file:xml文件的路径
    :return:
    """

    # dxml_file = open(xml_file,encoding='gbk')
    # tree = ET.parse(dxml_file).getroot()

    tree = ET.parse(xml_file)
    objs = tree.findall('object')
    for ix, obj in enumerate(objs):
        x0 = ET.Element("x0")  # 创建节点
        y0 = ET.Element("y0")
        x1 = ET.Element("x1")
        y1 = ET.Element("y1")
        x2 = ET.Element("x2")
        y2 = ET.Element("y2")
        x3 = ET.Element("x3")
        y3 = ET.Element("y3")
        # obj_type = obj.find('bndbox')
        # type = obj_type.text
        # print(xml_file)

        if (obj.find('robndbox') == None):
            obj_bnd = obj.find('bndbox')
            obj_xmin = obj_bnd.find('xmin')
            obj_ymin = obj_bnd.find('ymin')
            obj_xmax = obj_bnd.find('xmax')
            obj_ymax = obj_bnd.find('ymax')
            # 以防有负值坐标
            xmin = max(float(obj_xmin.text), 0)
            ymin = max(float(obj_ymin.text), 0)
            xmax = max(float(obj_xmax.text), 0)
            ymax = max(float(obj_ymax.text), 0)
            obj_bnd.remove(obj_xmin)  # 删除节点
            obj_bnd.remove(obj_ymin)
            obj_bnd.remove(obj_xmax)
            obj_bnd.remove(obj_ymax)
            x0.text = str(xmin)
            y0.text = str(ymax)
            x1.text = str(xmax)
            y1.text = str(ymax)
            x2.text = str(xmax)
            y2.text = str(ymin)
            x3.text = str(xmin)
            y3.text = str(ymin)
        else:
            obj_bnd = obj.find('robndbox')
            obj_bnd.tag = 'bndbox'  # 修改节点名
            obj_cx = obj_bnd.find('cx')
            obj_cy = obj_bnd.find('cy')
            obj_w = obj_bnd.find('w')
            obj_h = obj_bnd.find('h')
            obj_angle = obj_bnd.find('angle')
            cx = float(obj_cx.text)
            cy = float(obj_cy.text)
            w = float(obj_w.text)
            h = float(obj_h.text)
            angle = float(obj_angle.text)
            obj_bnd.remove(obj_cx)  # 删除节点
            obj_bnd.remove(obj_cy)
            obj_bnd.remove(obj_w)
            obj_bnd.remove(obj_h)
            obj_bnd.remove(obj_angle)

            x0.text, y0.text = rotatePoint(cx, cy, cx - w / 2, cy - h / 2, -angle)
            x1.text, y1.text = rotatePoint(cx, cy, cx + w / 2, cy - h / 2, -angle)
            x2.text, y2.text = rotatePoint(cx, cy, cx + w / 2, cy + h / 2, -angle)
            x3.text, y3.text = rotatePoint(cx, cy, cx - w / 2, cy + h / 2, -angle)

        # obj.remove(obj_type)  # 删除节点
        obj_bnd.append(x0)  # 新增节点
        obj_bnd.append(y0)
        obj_bnd.append(x1)
        obj_bnd.append(y1)
        obj_bnd.append(x2)
        obj_bnd.append(y2)
        obj_bnd.append(x3)
        obj_bnd.append(y3)

        tree.write(dotaxml_file, method='xml', encoding='utf-8')  # 更新xml文件


# 转换成四点坐标
def rotatePoint(xc, yc, xp, yp, theta):
    xoff = xp - xc
    yoff = yp - yc
    cosTheta = math.cos(theta)
    sinTheta = math.sin(theta)
    pResx = cosTheta * xoff + sinTheta * yoff
    pResy = - sinTheta * xoff + cosTheta * yoff
    return str(int(xc + pResx)), str(int(yc + pResy))


def totxt(xml_path, out_path):
    # 想要生成的txt文件保存的路径,这里可以自己修改

    files = os.listdir(xml_path)
    i = 0
    for file in files:

        tree = ET.parse(xml_path + os.sep + file)
        root = tree.getroot()

        name = file.split('.')[0]

        output = out_path + '/' + name + '.txt'
        file = open(output, 'w')
        i = i + 1
        objs = tree.findall('object')
        for obj in objs:
            cls = obj.find('name').text
            box = obj.find('bndbox')
            x0 = int(float(box.find('x0').text))
            y0 = int(float(box.find('y0').text))
            x1 = int(float(box.find('x1').text))
            y1 = int(float(box.find('y1').text))
            x2 = int(float(box.find('x2').text))
            y2 = int(float(box.find('y2').text))
            x3 = int(float(box.find('x3').text))
            y3 = int(float(box.find('y3').text))
            if x0 < 0:
                x0 = 0
            if x1 < 0:
                x1 = 0
            if x2 < 0:
                x2 = 0
            if x3 < 0:
                x3 = 0
            if y0 < 0:
                y0 = 0
            if y1 < 0:
                y1 = 0
            if y2 < 0:
                y2 = 0
            if y3 < 0:
                y3 = 0
            for cls_index, cls_name in enumerate(cls_list):
                if cls == cls_name:
                    file.write("{} {} {} {} {} {} {} {} {} {}\n".format(x0, y0, x1, y1, x2, y2, x3, y3, cls, cls_index))
        file.close()
        # print(output)
        print(i)


if __name__ == '__main__':
    # -----**** 第一步:把xml文件统一转换成旋转框的xml文件 ****-----
    roxml_path = '.../ultralytics-main/datasets/data_zsyobb/labels_all'
    dotaxml_path = '.../ultralytics-main/datasets/data_zsyobb/dotaxml'
    out_path = '.../ultralytics-main/datasets/data_zsyobb/dotatxt'
    filelist = os.listdir(roxml_path)
    for file in filelist:
        edit_xml(os.path.join(roxml_path, file), os.path.join(dotaxml_path, file))

    # -----**** 第二步:把旋转框xml文件转换成txt格式 ****-----
    totxt(dotaxml_path, out_path)


1.2.4 划分训练集和测试集
这个代码运行完,会在images和dotatxt文件夹下,按照比例7:3,划分和拷贝

import os
import shutil

from tqdm import tqdm
import random

Dataset_folder = r'...\ultralytics-main\datasets\data_zsyobb'
# 把当前工作目录改为指定路径
os.chdir(os.path.join(Dataset_folder, 'images'))  # images : 图片文件夹的名称
folder = '.'  # 代表os.chdir(os.path.join(Dataset_folder, 'images'))这个路径
imgs_list = os.listdir(folder)

random.seed(123)  # 固定随机种子,防止运行时出现bug后再次运行导致imgs_list 里面的图片名称顺序不一致

random.shuffle(imgs_list)  # 打乱

val_scal = 0.3  # 验证集比列
val_number = int(len(imgs_list) * val_scal)
val_files = imgs_list[:val_number]
train_files = imgs_list[val_number:]

print('all_files:', len(imgs_list))
print('train_files:', len(train_files))
print('val_files:', len(val_files))

os.mkdir('train')
for each in tqdm(train_files):
    shutil.move(each, 'train')

os.mkdir('val')
for each in tqdm(val_files):
    shutil.move(each, 'val')

os.chdir('../dotatxt')

os.mkdir('train_original')
for each in tqdm(train_files):
    json_file = os.path.splitext(each)[0] + '.txt'
    shutil.move(json_file, 'train')

os.mkdir('val_original')
for each in tqdm(val_files):
    json_file = os.path.splitext(each)[0] + '.txt'
    shutil.move(json_file, 'val')

print('划分完成')

1.2.5 最后一步了,坚持
等熟悉python后,要把这个写到一个脚本里面,真是烦死了
1.新建labels文件夹,将dotatxt文件夹中的两个文件夹train_original val_original拷贝过去,或者直接改了dotatxt文件夹名称也行
2.运行该代码,直接用pytorch的数据转换代码

from ultralytics.data.converter import convert_dota_to_yolo_obb
convert_dota_to_yolo_obb("...\\ultralytics-main\\datasets\\data_zsyobb")

前提: 进去convert_dota_to_yolo_obb函数,把label替换为自己的
在这里插入图片描述
换成自己后缀的图像
在这里插入图片描述
1.2.6 格式转换结束,可以开始训练了

2.训练模型

2.1 修改ymal文件,拷贝一个ultralytics-main\ultralytics\cfg\models\11\yolo11-obb.yaml
修改一下,加几行就行
在这里插入图片描述
2.2 大功告成,直接粘贴训练源码即可
至于调参,后面再说,先把流程走通

import argparse
from ultralytics import YOLO
def parse_opt(known=False):
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='yolo11n-obb.pt', help='initial weights path')
    parser.add_argument('--epochs', type=int, default=640, help='total training epochs')
    parser.add_argument('--imgsz', type=int, default=1024, help='train, val image size (pixels)')
    parser.add_argument('--batch', type=str, default=2, help='total batch size for all GPUs, -1 for autobatch')
    parser.add_argument('--lr0', type=str, default=0.001, help=' (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)')
    parser.add_argument('--cls', type=str, default=1.5, help=' (float) cls loss gain (scale with pixels)')
    parser.add_argument('--data', type=str, default='..../ultralytics-main/yolo11-zsyobb.yaml', help='dataset.yaml path')
    parser.add_argument('--workers', type=str, default=0)
    parser.add_argument('--device', type=str, default=0, help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    return parser.parse_known_args()[0] if known else parser.parse_args()

def main(opt):
    model = YOLO(opt.model)
    model.train(data=opt.data, epochs=opt.epochs, imgsz=opt.imgsz, batch=opt.batch, device=opt.device, lr0=opt.lr0, cls=opt.cls)
    model.val(imgsz=opt.imgsz)


if __name__ == '__main__':
    opt = parse_opt()
    main(opt)

2.3 预测一下

#-------------------------------预测--------------------------
from ultralytics import YOLO
import cv2
yolo = YOLO("best_sypobb.pt", task = "detect")
result = yolo(source="..../ultralytics-main/datasets/data_zsyobb/images_all",conf=0.6,vid_stride=1,iou=0.3,save = True)
print(result[0].obb)

2.4 模型导出 torchscrip

#---------------------模型导出------------------------------
from ultralytics import YOLO
# Load a model
model = YOLO("best_sypobb.pt")
# Export the model
model.export(format="torchscript",imgsz = 1024,device = 0,batch = 1)

3.windos libtorch 部署

真是见鬼了,在网上搜了好久,都没有个直接能粘贴的源码,都是onnx部署的,我好不容易调通了libtorch的yolo11检测,想换个obb就不知道咋弄了,我这个部署教程写细一点,应该是CSDN独一份能直接粘贴的OBB libtorch部署的C++源码
3.1 首先是环境安装,大家直接到我上一篇的地方去看吧,首行后链接
3.2 直接上源码

#include <iostream>

#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <torch/torch.h>
#include <torch/script.h>

using torch::indexing::Slice;
using torch::indexing::None;
struct ResizeParams
{
	//实际图像的左上角点与原始图像0 0 的偏移
	int top;
	int left;
	//去除灰边的图像宽高
	int imgw;
	int imgh;
	//ratio 缩放图像和原始图像的宽高比例
	float ratiow;
	float ratioh;
};
struct Object
{
	cv::RotatedRect rrect;
	int label;
	float prob;
};
float intersection_area(const Object& a, const Object& b)
{
	std::vector<cv::Point2f> intersection;
	cv::rotatedRectangleIntersection(a.rrect, b.rrect, intersection);
	if (intersection.empty())
		return 0.f;
	return cv::contourArea(intersection);
}
void nms_sorted_bboxes(const std::vector<Object>& objects, std::vector<int>& picked, float nms_threshold, bool agnostic = false)
{
	picked.clear();

	const int n = objects.size();

	std::vector<float> areas(n);
	for (int i = 0; i < n; i++)
	{
		areas[i] = objects[i].rrect.size.area();
	}

	for (int i = 0; i < n; i++)
	{
		const Object& a = objects[i];

		int keep = 1;
		for (int j = 0; j < (int)picked.size(); j++)
		{
			const Object& b = objects[picked[j]];

			if (!agnostic && a.label != b.label)
				continue;

			// intersection over union
			float inter_area = intersection_area(a, b);
			float union_area = areas[i] + areas[picked[j]] - inter_area;
			// float IoU = inter_area / union_area;
			if (inter_area / union_area > nms_threshold)
				keep = 0;
		}

		if (keep)
			picked.push_back(i);
	}
}
//flg true/false 分别为降序/升序
template<typename T>void vector_sort(std::vector<T>vector_input, std::vector<size_t>&idx,float flg){
    int num = vector_input.size();
    idx.resize(num);
    for (int i = 0; i < num; i++)
    {
        idx[i] = i;
    }
    if(flg){
        std::sort(idx.begin(), idx.end(), [&vector_input](size_t i1, size_t i2){return vector_input[i1] > vector_input[i2]; });
    }else{
        std::sort(idx.begin(), idx.end(), [&vector_input](size_t i1, size_t i2){return vector_input[i1] < vector_input[i2]; });
    }
}
//开始非极大抑制前的数据准备
void PrepareData(torch::Tensor output, std::vector<Object>& objects,float conf) {
	objects.clear();
	auto output_data = output.squeeze();
	int num_params = output_data.size(0);  // 8
	int num_boxes = output_data.size(1);   // 8400
	int classnum = num_params - 5;
	std::vector<Object>Tobjvec;
	std::vector<float>confvec;
	for (int i = 0; i < num_boxes; i++) {
		float x_center = output_data[0][i].item<float>();  // 中心 x 坐标
		float y_center = output_data[1][i].item<float>();  // 中心 y 坐标
		float width = output_data[2][i].item<float>();     // 宽度
		float height = output_data[3][i].item<float>();    // 高度
		float angle = output_data[num_params-1][i].item<float>();    // 高度
		int label = 0;
		float maxconf = 0;
		for (int j = 4; j < num_params-1; j++) {
			float conf_ = output_data[j][i].item<float>();
			if (conf_ > maxconf) {
				maxconf = conf_;
				label = j - 4;
			}
		}
		//如果最大的置信度小于给定的阈值,跳过
		if (maxconf < conf) {
			continue;
		}
		//开始把结果赋值给结构体
		Object obj1;
		obj1.label = label;
		obj1.prob = maxconf;
		obj1.rrect = cv::RotatedRect(cv::Point2f{ x_center,y_center }, cv::Size(width, height), angle*180.0 / M_PI);
		Tobjvec.push_back(obj1);
		confvec.push_back(maxconf);
	}
	//按照概率大小进行排序,降序
	std::vector<size_t>sortid;
	vector_sort(confvec, sortid, true);
	for (int i = 0; i < sortid.size(); i++) {
		objects.push_back(Tobjvec[sortid[i]]);
	}
}
float generate_scale(cv::Mat& image, const std::vector<int>& target_size) {
	int origin_w = image.cols;
	int origin_h = image.rows;

	int target_h = target_size[0];
	int target_w = target_size[1];

	float ratio_h = static_cast<float>(target_h) / static_cast<float>(origin_h);
	float ratio_w = static_cast<float>(target_w) / static_cast<float>(origin_w);
	float resize_scale = std::min(ratio_h, ratio_w);
	return resize_scale;
}


float letterbox(cv::Mat &input_image, cv::Mat &output_image, const std::vector<int> &target_size, ResizeParams &params) {
	if (input_image.cols == target_size[1] && input_image.rows == target_size[0]) {
		if (input_image.data == output_image.data) 
		{
			return 1.;
		}
		else 
		{
			output_image = input_image.clone();
			return 1.;
		}
	}

	float resize_scale = generate_scale(input_image, target_size);
	int new_shape_w = std::round(input_image.cols * resize_scale);
	int new_shape_h = std::round(input_image.rows * resize_scale);
	float padw = (target_size[1] - new_shape_w) / 2.;
	float padh = (target_size[0] - new_shape_h) / 2.;

	int top = std::round(padh - 0.1);
	int bottom = std::round(padh + 0.1);
	int left = std::round(padw - 0.1);
	int right = std::round(padw + 0.1);

	params.top = top;
	params.left = left;
	params.imgw = new_shape_w;
	params.imgh = new_shape_h;
	params.ratiow = resize_scale;
	params.ratioh = resize_scale;


	cv::resize(input_image, output_image,
		cv::Size(new_shape_w, new_shape_h),
		0, 0, cv::INTER_AREA);

	cv::copyMakeBorder(output_image, output_image, top, bottom, left, right,
		cv::BORDER_CONSTANT, cv::Scalar(114., 114., 114));
	return resize_scale;
}
void drawImage(cv::Mat &image, Object obj1, ResizeParams params)
{
	std::vector<cv::Scalar>colorlabel;
	colorlabel.push_back(cv::Scalar(255, 0, 0));
	colorlabel.push_back(cv::Scalar(0, 255, 0));
	colorlabel.push_back(cv::Scalar(0, 0, 255));

	cv::Point2f corners[4];
	obj1.rrect.points(corners);
	cv::Point2f ori_corners[4];
	for (size_t i = 0; i < 4; i++) {
		ori_corners[i].x = (corners[i].x - params.left) / params.ratiow;
		ori_corners[i].y = (corners[i].y - params.top) / params.ratioh;
	}
	for (int i = 0; i < 4; i++) {
		cv::line(image, ori_corners[i], ori_corners[(i + 1) % 4], colorlabel[obj1.label], 1);
	}
}
std::vector<float>RotatedRectToXY4(Object obj1, ResizeParams params) {
	cv::Point2f corners[4];
	obj1.rrect.points(corners);
	cv::Point2f ori_corners[4];
	for (size_t i = 0; i < 4; i++) {
		ori_corners[i].x = (corners[i].x - params.left) / params.ratiow;
		ori_corners[i].y = (corners[i].y - params.top) / params.ratioh;
	}
	std::vector<float>corner;
	for (size_t i = 0; i < 4; i++) {
		corner.push_back(ori_corners[i].x);
		corner.push_back(ori_corners[i].y);
	}
	corner.push_back(obj1.label);
	corner.push_back(obj1.prob);
	return corner;
}
#include<windows.h>
int main() {
	printf("torch::cuda::is_available:%d\n", torch::cuda::is_available());
	// Device
	//torch::Device device(torch::cuda::is_available() ? torch::kCUDA : torch::kCPU);
	torch::Device device(torch::kCUDA);
	std::vector<std::string> classes{ "pepper" };

	try {
		std::string model_path = ".../ultralytics-main/best_sypobb.torchscript";
		torch::jit::script::Module yolo_model;
		LoadLibraryA("ATen_cuda.dll");
		LoadLibraryA("c10_cuda.dll");
		LoadLibraryA("torch_cuda.dll");
		LoadLibraryA("torchvision.dll");
		yolo_model = torch::jit::load(model_path, device);
		yolo_model.eval();
		yolo_model.to(device, torch::kFloat32);

		ResizeParams params;
		// Load image and preprocess
		cv::Mat image = cv::imread(".../ultralytics-main/20250829-102331640.jpg");
		cv::Mat input_image;
		letterbox(image, input_image, { 1024, 1024}, params);
		cv::cvtColor(input_image, input_image, cv::COLOR_BGR2RGB);

		torch::Tensor image_tensor = torch::from_blob(input_image.data, { input_image.rows, input_image.cols, 3 }, torch::kByte).to(device);
		image_tensor = image_tensor.toType(torch::kFloat32).div(255);
		image_tensor = image_tensor.permute({ 2, 0, 1 });
		image_tensor = image_tensor.unsqueeze(0);
		std::vector<torch::jit::IValue> inputs{ image_tensor };


		auto start = std::chrono::high_resolution_clock::now();
		// Inference
		torch::Tensor output = yolo_model.forward(inputs).toTensor().cpu();
		std::vector<Object> objects;
		PrepareData(output, objects, 0.6);
		for (int i = 0; i < objects.size(); i++) {
			printf("%d %d %.3f\n",i, objects[i].label, objects[i].prob);
		}
		//开始非极大抑制
		std::vector<int> picked;
		nms_sorted_bboxes(objects, picked, 0.3);
		for (int i = 0; i < picked.size(); i++) {
			printf("%d %d %d %.3f\n", i, picked[i], objects[picked[i]].label, objects[picked[i]].prob);
		}
		for (int i = 0; i < picked.size(); i++) {
			int id = picked[i];
			//绘制框
			drawImage(image, objects[id], params);
			//打印四角点
			std::vector<float>corner = RotatedRectToXY4(objects[id], params);
			for (int j = 0; j < corner.size(); j++) {
				printf("%.3f ", corner[j]);
			}
			printf("\n");
		}
		cv::imwrite("..../20250829-102331640_out.jpg", image);
	}
	catch (const c10::Error& e) {
		std::cout << e.msg() << std::endl;
	}

	return 0;
}

3.3 简单说一下
3.3.1 就是模型加载,前向,最后得到的是1 * 8 * 21504(yolo v8 1 * 8 * 8400)的输出数据
1 表示一张影像
8 表示一个框的所有参数 分别表示 cenx ceny w h conf1 conf2 conf3 angle
21504表示有21504个框的数据
注意:如果类别更多,那就是1*(x,y,w,h,nc1,nc2,…,ncn,angle)*21504了,对应修改代码吧
3.3.2 下来就是非极大抑制
1.首先 需要把每个框的lable确认一下,即 conf1 conf2 conf3 哪个最大就是哪一类
2.将符合置信度阈值的先拿出来
3.然后按照概率大小降序排列,非极大抑制要从最大置信度开始判断
4.非极大抑制,主要是看IOU,这里的IOU直接利用opencv自带的函数确定,交并比,小于nms阈值的保留下来,就是最终的预测结果
3.3.3 得到预测框后,需要把四角点坐标取出来,然后反算回原始图像位置
1.获取四角点位置,直接用opencv带的函数,省去自己写了
2.换算回去,不能直接根据宽高比反算,因为最开始缩放图像是添加了灰条,所有要看灰条的顶和左起始像素位置和宽高比反算
3.这些代码去上面里面扒吧
非极大抑制前,符合conf阈值的效果类似这样
有灰条 + 一堆框框
在这里插入图片描述

非极大抑制后,换算回原始图像预测的效果类似这样
代码输出:四角点坐标+三类(0,1,2)+置信度(0.961,0.914,0.892)
在这里插入图片描述
换算回去的图就不贴了,涉及项目,自己按照坐标绘制就行
贴一个其他项目应用的截图吧,代替一下
在这里插入图片描述

4.windos libtorch 动态库封装 + QT mingw调用

看上一章内容吧,一模一样,把代码重新组织一下就行了

5.后面有空再看下segment,该应用暂时用不上,看后面啥时候有空吧

如果写的对你有帮助,那就留下点赞和收藏吧…