Vs+Qt环境下: C++调用CUDA的三种使用方式运行效果
进入正文前请保证你的计算机已完成"cuda基础环境配置"
正文
1、创建Qt工程,环境搭建参考上一篇文章中正文的第2、3节。然后设置debug模式为显示控制台。
2、创建够下图中三个cuda文件,并写入代码
CudaTest_C.cu
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/*
同一个线程块中的线程可以相互合作,不同线程块中的线程不能协作,根据此规则可以更好的配置线程的分布以适应要处理的数据。
主机调用核函数后,控制权立即返回(相当于函数立即返回),主机和设备异步执行。所有的核函数都是异步、__global__核函数的返回值必须是void类型。
*/
__global__ void _AddKernel_(int *c, const int *a, const int *b)
{
*c = *a + *b;
printf("网格维度: \t\t\t(%d ,%d ,%d)\n", gridDim.x, gridDim.y, gridDim.z);
printf("线程块维度: \t\t\t(%d ,%d ,%d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("线程块在网格中的索引: \t\t(%d ,%d ,%d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("线程在其线程块中的索引: \t(%d ,%d ,%d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
}
extern "C" int SampleCalc(const int a, const int b)
{
// 创建cuda内存,并将cpu数据拷贝到gpu内存中
int *dev_a = nullptr; cudaMalloc((void **)&dev_a, sizeof(int)); cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
int *dev_b = nullptr; cudaMalloc((void **)&dev_b, sizeof(int)); cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
// 将数据传入核函数进行计算并返回
int *dev_c = nullptr; cudaMalloc((void **)&dev_c, sizeof(int));
/*
_AddKernel_<< <1, 3 >> > : 1个网格、3个线程块,所以_AddKernel_核函数会执行3次
(1为网格数量,网格是由多个线程块组成的;3为线程块数量,线程块包含了多个线程。)
*/
#if 1
_AddKernel_<< <1, 3 >> > (dev_c, dev_a, dev_b);
#else
// 64
dim3 blocksPerGrid(2, 2, 1);
dim3 threadsPerBlock(4, 4, 1);
_AddKernel_ << <blocksPerGrid, threadsPerBlock >> > (dev_c, dev_a, dev_b);
#endif
// 将gpu计算后的数据拷贝回cpu
int c = -1; cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
// 释放创建的cuda内存等
if (dev_a) { cudaFree(dev_a); }dev_a = nullptr;
if (dev_b) { cudaFree(dev_b); }dev_b = nullptr;
if (dev_c) { cudaFree(dev_c); }dev_c = nullptr;
cudaDeviceReset();
cudaDeviceSynchronize();
return c;
}
CudaTest.cuh
#ifndef MAIN_CUH
#define MAIN_CUH
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
class CudaTest
{
public:
CudaTest();
int RunCalc();
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
};
#endif
CudaTest.cu
#include "CudaTest.cuh"
#include "stdio.h"
#include <Windows.h>
CudaTest::CudaTest() {}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int CudaTest::RunCalc()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
cudaError_t cudaStatus = cudaSuccess;
for (unsigned short index = 0; index < 3; ++index)
{
// Add vectors in parallel.
cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; }
printf("%d --- {1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", index, c[0], c[1], c[2], c[3], c[4]);
Sleep(1);
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; }
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t CudaTest::addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
3、写入QtWidgetsApplication1.cpp代码
#include "QtWidgetsApplication1.h"
#include <QDebug>
#include <Windows.h>
#include "CudaTest.cuh"
extern "C"
{
int SampleCalc(const int a, const int b);
}
QtWidgetsApplication1::QtWidgetsApplication1(QWidget *parent) : QMainWindow(parent)
{
ui.setupUi(this);
/************************************************************************/ /* 方式一(推荐): 在.cpp中调用.cu文件内我们实现的C函数 */
qDebug() << "SampleCalc: " << SampleCalc(10, 20) << "\n";
/************************************************************************/
/************************************************************************/ /* 方式二(推荐): 在.cpp中调用.cu文件内我们实现的类 */
CudaTest _cuda_;
qDebug() << "_cuda_.RunCalc(); -> " << _cuda_.RunCalc() << "\n";
/************************************************************************/
/************************************************************************/ /* 方式三(不推荐): 在.cpp中直接调cuda函数,缺点是有些cuda符号不能被编译成功 */
int driver_version(0), runtime_version(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("\nDevice%d:\"%s\"\n", 0, deviceProp.name);
cudaDriverGetVersion(&driver_version);
printf("CUDA驱动版本: %d.%d\n", driver_version / 1000, (driver_version % 1000) / 10);
cudaRuntimeGetVersion(&runtime_version);
printf("CUDA运行时版本: %d.%d\n", runtime_version / 1000, (runtime_version % 1000) / 10);
printf("设备计算能力: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Total amount of Global Memory: %u bytes\n", deviceProp.totalGlobalMem);
printf("Number of SMs: %d\n", deviceProp.multiProcessorCount);
printf("Total amount of Constant Memory: %u bytes\n", deviceProp.totalConstMem);
printf("Total amount of Shared Memory per block: %u bytes\n", (int)deviceProp.sharedMemPerBlock);
printf("Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf("Warp size: %d\n", deviceProp.warpSize);
printf("Maximum number of threads per SM: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("Maximum size of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf("Maximum size of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("Maximum memory pitch: %u bytes\n", deviceProp.memPitch);
printf("Texture alignmemt: %u bytes\n", deviceProp.texturePitchAlignment);
printf("Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f);
printf("Memory Clock rate: %.0f MHz\n", deviceProp.memoryClockRate * 1e-3f);
printf("Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth);
/************************************************************************/
}
QtWidgetsApplication1::~QtWidgetsApplication1()
{}
Vs+Qt环境下: C++调用CUDA的三种使用方式运行效果
关注
笔者 - 东旭