CUDA编程实践(一)

来源:互联网 发布:找淘宝客服工作 编辑:程序博客网 时间:2024/06/07 00:14
#include "cuda_runtime.h"#include "device_launch_parameters.h"#include <stdio.h>cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);__global__ void addKernel(int *c, const int *a, const int *b){    int i = threadIdx.x;    c[i] = a[i] + b[i];}int main(){    const int arraySize = 5;    const int a[arraySize] = { 1, 2, 3, 4, 5 };    const int b[arraySize] = { 10, 20, 30, 40, 50 };    int c[arraySize] = { 0 };    // Add vectors in parallel.    cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "addWithCuda failed!");        return 1;    }    printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",        c[0], c[1], c[2], c[3], c[4]);    // cudaDeviceReset must be called before exiting in order for profiling and    // tracing tools such as Nsight and Visual Profiler to show complete traces.    cudaStatus = cudaDeviceReset();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaDeviceReset failed!");        return 1;    }getchar();    return 0;}// Helper function for using CUDA to add vectors in parallel.cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size){    int *dev_a = 0;    int *dev_b = 0;    int *dev_c = 0;    cudaError_t cudaStatus;    // Choose which GPU to run on, change this on a multi-GPU system.    cudaStatus = cudaSetDevice(0);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");        goto Error;    }    // Allocate GPU buffers for three vectors (two input, one output)    .    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    // Copy input vectors from host memory to GPU buffers.    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }    cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }    // Launch a kernel on the GPU with one thread for each element.    addKernel<<<1, size>>>(dev_c, dev_a, dev_b);    // Check for any errors launching the kernel    cudaStatus = cudaGetLastError();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));        goto Error;    }        // cudaDeviceSynchronize waits for the kernel to finish, and returns    // any errors encountered during the launch.    cudaStatus = cudaDeviceSynchronize();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);        goto Error;    }    // Copy output vector from GPU buffer to host memory.    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }Error:    cudaFree(dev_c);    cudaFree(dev_a);    cudaFree(dev_b);       return cudaStatus;}

// CUDA runtime 库 + CUBLAS 库 #include "cuda_runtime.h" #include "cublas_v2.h" #include <time.h> #include <iostream> using namespace std;// 定义测试矩阵的维度 int const M = 50;int const N = 100;int main(){// 定义状态变量 cublasStatus_t status;// 在内存中为将要计算的矩阵开辟空间 float *h_A = (float*)malloc(N*M*sizeof(float));float *h_B = (float*)malloc(N*M*sizeof(float));// 在 内存 中为将要存放运算结果的矩阵开辟空间 float *h_C = (float*)malloc(M*M*sizeof(float));// 为待运算矩阵的元素赋予 0-10 范围内的随机数 for (int i = 0; i<N*M; i++) {h_A[i] = (float)(rand() % 10 + 1);h_B[i] = (float)(rand() % 10 + 1);}// 打印待测试的矩阵 cout << "矩阵 A :" << endl;for (int i = 0; i<N*M; i++){cout << h_A[i] << " ";if ((i + 1) % N == 0) cout << endl;}cout << endl;cout << "矩阵 B :" << endl;for (int i = 0; i<N*M; i++){cout << h_B[i] << " ";if ((i + 1) % M == 0) cout << endl;}cout << endl;/*** GPU 计算矩阵相乘*/// 创建并初始化 CUBLAS 库对象cublasHandle_t handle;status = cublasCreate(&handle);if (status != CUBLAS_STATUS_SUCCESS){if (status == CUBLAS_STATUS_NOT_INITIALIZED) {cout << "CUBLAS 对象实例化出错" << endl;}getchar();return EXIT_FAILURE;}float *d_A, *d_B, *d_C;// 在 显存 中为将要计算的矩阵开辟空间 cudaMalloc((void**)&d_A, // 指向开辟的空间的指针 N*M * sizeof(float) // 需要开辟空间的字节数 );cudaMalloc((void**)&d_B,N*M * sizeof(float));// 在 显存 中为将要存放运算结果的矩阵开辟空间 cudaMalloc((void**)&d_C,M*M * sizeof(float));// 将矩阵数据传递进 显存 中已经开辟好了的空间 cublasSetVector(N*M, // 要存入显存的元素个数 sizeof(float), // 每个元素大小 h_A, // 主机端起始地址 1, // 连续元素之间的存储间隔 d_A, // GPU 端起始地址 1 // 连续元素之间的存储间隔 );cublasSetVector(N*M,sizeof(float),h_B,1,d_B,1);// 同步函数cudaThreadSynchronize();// 传递进矩阵相乘函数中的参数,具体含义请参考函数手册。 float a = 1; float b = 0;// 矩阵相乘。该函数必然将数组解析成列优先数组 cublasSgemm(handle, // blas 库对象 CUBLAS_OP_T, // 矩阵 A 属性参数 CUBLAS_OP_T, // 矩阵 B 属性参数M, // A, C 的行数 M, // B, C 的列数N, // A 的列数和 B 的行数&a, // 运算式的 α 值 d_A, // A 在显存中的地址 N, // lda d_B, // B 在显存中的地址 M, // ldb &b, // 运算式的 β 值 d_C, // C 在显存中的地址(结果矩阵) M // );// 同步函数 cudaThreadSynchronize();// 从 显存 中取出运算结果至 内存中去cublasGetVector(M*M, // 要取出元素的个数 sizeof(float), // 每个元素大小 d_C, // GPU 端起始地址 1, // 连续元素之间的存储间隔 h_C, // 主机端起始地址1 // 连续元素之间的存储间隔 );// 打印运算结果 cout << "计算结果的转置 ( (A*B)的转置 ):" << endl;for (int i = 0; i<M*M; i++){cout << h_C[i] << " ";if ((i + 1) % M == 0) cout << endl;}// 清理掉使用过的内存 free(h_A);free(h_B);free(h_C);cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);// 释放 CUBLAS 库对象cublasDestroy(handle);getchar();return 0;}


1.cudasetDevice(0)

2.cudaMalloc

3.cudaMemcpy

4.gpu_kernel<<<blocks,threads>>>

5.cudaMemcpy

6cudaFree(d_a)

7cudaDevicereset(0)


CUDA安装方式参考http://blog.csdn.net/u011314529/article/details/51505029

原创粉丝点击