cuda两数组相加,带数组长度

来源:互联网 发布:淘宝宝贝详情怎么设置 编辑:程序博客网 时间:2024/04/30 23:01
#include "cuda_runtime.h"#include "device_launch_parameters.h"#include <stdio.h>#include<iostream> float A[1000]; float B[1000];float C[1000];cudaError_t addWithCuda(float *c, float *a, float *b, int size);__global__ void addKernel(float *C,float *A,float *B,int n){int i = threadIdx.x+blockDim.x*blockIdx.x;if(i<n) C[i]=A[i]+B[i];}int main(){int arraySize = 1000;    //int c[arraySize] = { 0 };for(int i=0;i<arraySize;i++){A[i]=i+1;B[i]=i+1;}    // Add vectors in parallel.    cudaError_t cudaStatus = addWithCuda(C, A, B, arraySize);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "addWithCuda failed!");int d;std::cin>>d;        return 1;    }   /* printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",        c[0], c[1], c[2], c[3], c[4]);*/for(int i=0;i<arraySize;i++){printf("%f ",C[i]);if((i+1)%20==0) printf("\n");}    // cudaDeviceReset must be called before exiting in order for profiling and    // tracing tools such as Nsight and Visual Profiler to show complete traces.    cudaStatus = cudaDeviceReset();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaDeviceReset failed!");int d;std::cin>>d;        return 1;    }int d;std::cin>>d;    return 0;}// Helper function for using CUDA to add vectors in parallel.cudaError_t addWithCuda(float *c, float *a, float *b, int size){    float *dev_a = 0;    float *dev_b = 0;    float *dev_c = 0;int *dev_size=0;    cudaError_t cudaStatus;    // Choose which GPU to run on, change this on a multi-GPU system.    cudaStatus = cudaSetDevice(0);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");        goto Error;    }    // Allocate GPU buffers for three vectors (two input, one output)    .    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMalloc failed!");        goto Error;    }    // Copy input vectors from host memory to GPU buffers.    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }    cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }/*  cudaStatus = cudaMemcpy(dev_size, &size, sizeof(int), cudaMemcpyHostToDevice);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;}*/    // Launch a kernel on the GPU with one thread for each element.    addKernel<<<ceil(size/256.0),256 >>>(dev_c, dev_a, dev_b,size);    // Check for any errors launching the kernel    cudaStatus = cudaGetLastError();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));int d;std::cin>>d;        goto Error;    }        // cudaDeviceSynchronize waits for the kernel to finish, and returns    // any errors encountered during the launch.    cudaStatus = cudaDeviceSynchronize();    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);        goto Error;    }    // Copy output vector from GPU buffer to host memory.    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);    if (cudaStatus != cudaSuccess) {        fprintf(stderr, "cudaMemcpy failed!");        goto Error;    }Error:    cudaFree(dev_c);    cudaFree(dev_a);    cudaFree(dev_b);cudaFree(&dev_size);    return cudaStatus;}

0 0