CUDA归约处理——向量的内积

来源:互联网 发布:linux cat 输出到文件 编辑:程序博客网 时间:2024/05/01 14:34

       今天在作程序时,其中有一步需要作图像的累加处理,用CPU实现速度太慢了,就想着用GPU实现,查阅了相关资料,完成了一个向量内积的处理,其中包含了归约的思想。发上来,希望对学习CUDA有所帮助。代码很容易懂。

// vectorReduce.cpp : 定义控制台应用程序的入口点。

#include <stdio.h>
#include <tchar.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<sdkHelper.h>
#include<cutil.h>
#define imin(a,b) (a<b?a:b)
const int N = 1024* 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid =   imin( 512, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( float *a, float *b, float *c )

{
    __shared__ float  cache[threadsPerBlock];//定义共享存储器,这一步很重要
    int  tid = threadIdx.x + blockIdx.x * blockDim.x;
    int  cacheIndex = threadIdx.x;
    float    temp = 0;
    while (tid < N)

    {
        temp += a[tid] * b[tid];
        tid += blockDim.x * gridDim.x;
    }
    // 设置cach[cachIndex]的值
     cache[cacheIndex] = temp;
   
    // 块同步处理
     __syncthreads();
     // for reductions, threadsPerBlock must be a power of 2
    // because of the following code
     int  i = blockDim.x/2;
    while (i != 0)

     {
         if (cacheIndex < i)
            cache[cacheIndex] += cache[cacheIndex + i];
        __syncthreads();
        i /= 2;
     }
     if (cacheIndex == 0)
         c[blockIdx.x] = cache[0];
    }
int  main( void )

 {
    float    *a, *b, c, *partial_c;
    float    *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the CPU side
     a = (float*)malloc( N*sizeof(float) );
    b = (float*)malloc( N*sizeof(float) );
    partial_c = (float*)malloc( blocksPerGrid*sizeof(float) );
 // allocate the memory on the GPU 
 cudaMalloc( (void**)&dev_a,N*sizeof(float) );
 cudaMalloc( (void**)&dev_b,  N*sizeof(float) );
 cudaMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) );

 // fill in the host memory with data
     for  (int  i=0; i<N; i++) {
        a[i] = i;
        b[i] = i*2;
    }
    // copy the arrays ‘a’ and ‘b’ to the GPU 
  cudaMemcpy( dev_a, a, N* sizeof(float), cudaMemcpyHostToDevice );
  cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice );

 dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c );

     // copy the array 'c' back from the GPU to the CPU
  // finish up on the CPU side
 
 cudaMemcpy( partial_c, dev_partial_c,blocksPerGrid*sizeof(float),cudaMemcpyDeviceToHost );
    c = 0;
    for  (int  i=0; i<blocksPerGrid; i++) {
        c += partial_c[i];
    }
    #define sum_squares(x)  (x*(x+1)*(2*x+1)/6)
    printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) );
 // free memory on the GPU side
     cudaFree( dev_a );
    cudaFree( dev_b );
    cudaFree( dev_partial_c );
 // free memory on the CPU side
    free( a );
    free( b );
    free( partial_c );
 getchar();
}

 

原创粉丝点击