矩阵相乘的优化

来源:互联网 发布:按键精灵节奏大师源码 编辑:程序博客网 时间:2024/05/21 15:50

初始版本


#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
using namespace std;


const int num_threads=256;


__global__ static void matMultCUDA(const float* a,size_t lda,
  const float* b,size_t ldb,
  float* c,size_t ldc,int n)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
const int idx=bid*blockDim.x+tid;


const int row=idx/n;
const int column=idx%n;
int i;


if(row<n&&column<n)
{
float t=0;
for(i=0;i<n;i++)
{
t+=a[row*lda+i]*b[i*ldb+column];
}
c[row*ldc+column]=t;
}
}


clock_t matmultCUDA(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
float *ac,*bc,*cc;
clock_t start,end;
start=clock();
cudaMalloc((void**)&ac,sizeof(float)*n*n);
cudaMalloc((void**)&bc,sizeof(float)*n*n);
cudaMalloc((void**)&cc,sizeof(float)*n*n);
cudaMemcpy2D(ac,sizeof(float)*n,a,sizeof(float)*lda,
sizeof(float)*n,n,cudaMemcpyHostToDevice);
cudaMemcpy2D(bc,sizeof(float)*n,b,sizeof(float)*ldb,
sizeof(float)*n,n,cudaMemcpyHostToDevice);
int blocks=(n+num_threads-1)/num_threads;
matMultCUDA<<<blocks*n,num_threads>>>
(ac,n,bc,n,cc,n,n);
cudaMemcpy2D(c,sizeof(float)*ldc,cc,sizeof(float)*n,
sizeof(float)*n,n,cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end=clock();
return (end-start);
}


void matgen(float* a,int lda,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
a[i*lda+j]=(float)rand()/RAND_MAX+
(float)rand()/(RAND_MAX*RAND_MAX);
}
}
}


void matmult(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
int i,j,k;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
double t=0;
for(k=0;k<n;k++)
{
t+=a[i*lda+k]*b[k*ldb+j];
}
c[i*ldc+j]=t;
}
}
}


void compare_mat(const float* a,int lda,const float* b,
int ldb,int n)
{
float max_err=0;
float average_err=0;
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
if(b[i*ldb+j]!=0)
{
float err=fabs((a[i*lda+j]-
b[i*ldb+j])/b[i*ldb+j]);
if(max_err<err) max_err=err;
average_err+=err;
}
}
}
cout<<"Max error: "<<max_err<<"    Average error: "<<
average_err/(n*n)<<endl;
}




int main(void)
{
float *a,*b,*c,*d;
int n=1000;


a=(float*)malloc(sizeof(float)*n*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n*n);
d=(float*)malloc(sizeof(float)*n*n);


srand(0);
matgen(a,n,n);
matgen(b,n,n);
clock_t time=matmultCUDA(a,n,b,n,c,n,n);
matmult(a,n,b,n,d,n,n);
compare_mat(c,n,d,n,n);
double sec=(double)time/CLOCKS_PER_SEC;
cout<<"Time used: "<<sec<<"    "
<<2.0*n*n*n/(sec*1E9)<<"GFLOPS"<<endl;


return 0;
}



从中可以看出误差比较大,可以通过 Kahan's Summation Formula 来提高精度。

+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++



__global__ static void matMultCUDA(const float* a,size_t lda,
  const float* b,size_t ldb,
  float* c,size_t ldc,int n)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
const int idx=bid*blockDim.x+tid;


const int row=idx/n;
const int column=idx%n;
int i;


if(row<n&&column<n)
{
float t=0;
float y=0;
for(i=0;i<n;i++)
{
float r;
y-=a[row*lda+i]*b[i*ldb+column];
r=t-y;
y=(r-t)+y;
t=r;
}
c[row*ldc+column]=t;
}
}


通过使用共享内存来增加速度。

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


__global__ static void matMultCUDA(const float* a,size_t lda,
  const float* b,size_t ldb,
  float* c,size_t ldc,int n)
{
extern __shared__ float data[];
const int tid=threadIdx.x;
const int row=blockIdx.x;


int i,j;
for(i=tid;i<n;i+=blockDim.x)
{
data[i]=a[row*lda+i];//对应每一行
}
__syncthreads();
for(j=tid;j<n;j+=blockDim.x)
{
float t=0;
float y=0;
for(i=0;i<n;i++)
{
float r;
y-=data[i]*b[i*ldb+j];
r=t-y;
y=(r-t)+y;
t=r;
}
c[row*ldc+j]=t;
}


}



会有这样的结果,原因其实还是同样的:对内存的存取次数太多了。虽然现在 A 矩阵的 row 的数据已经不再需要重复读取,但是 B 矩阵的 column 的数据仍然一直被重复读取。

另一个问题比较不是那么明显:对 B 矩阵的读取,虽然看起来不连续,但实际上它是连续的。这是因为不同的 thread 会读取不同的 column,因此同时间每个 thread 读取的各个 column 加起来,就是一个连续的内存区块。那么,为什么效率还是不佳呢?这是因为,GPU上的内存控制器,从某个固定的倍数地址开始读取,才会有最高的效率(例如 16 bytes的倍数)。由于矩阵大小并不是 16 的倍数(这里使用的是 1000x1000 的矩阵),所以造成效率不佳的情形。

要解决这个问题,我们可以在 cudaMalloc 的时候稍微修改一下,让宽度变成 适当的倍数就可以了。但是,适当的倍数是多少呢?幸运的是,我们并不需要知道这些细节。CUDA 提供了一个 cudaMallocPitch 的函式,可以自动以最佳的倍数来配置内存。


++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
using namespace std;


const int num_threads=256;


__global__ static void matMultCUDA(const float* a,size_t lda,
  const float* b,size_t ldb,
  float* c,size_t ldc,int n)
{
extern __shared__ float data[];
const int tid=threadIdx.x;
const int row=blockIdx.x;


int i,j;
for(i=tid;i<n;i+=blockDim.x)
{
data[i]=a[row*lda+i];//对应每一行
}
__syncthreads();
for(j=tid;j<n;j+=blockDim.x)
{
float t=0;
float y=0;
for(i=0;i<n;i++)
{
float r;
y-=data[i]*b[i*ldb+j];
r=t-y;
y=(r-t)+y;
t=r;
}
c[row*ldc+j]=t;
}


}


clock_t matmultCUDA(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
float *ac,*bc,*cc;
clock_t start,end;
start=clock();
size_t pitch_a,pitch_b,pitch_c;
cudaMallocPitch((void**)&ac,&pitch_a,sizeof(float)*n,n);
cudaMallocPitch((void**)&bc,&pitch_b,sizeof(float)*n,n);
cudaMallocPitch((void**)&cc,&pitch_c,sizeof(float)*n,n);
cudaMemcpy2D(ac,pitch_a,a,sizeof(float)*lda,
sizeof(float)*n,n,cudaMemcpyHostToDevice);
cudaMemcpy2D(bc,pitch_b,b,sizeof(float)*ldb,
sizeof(float)*n,n,cudaMemcpyHostToDevice);
int blocks=(n+num_threads-1)/num_threads;
matMultCUDA<<<n,num_threads,sizeof(float)*n>>>
(ac,pitch_a/sizeof(float),bc,pitch_b/sizeof(float),
cc,pitch_c/sizeof(float),n);
cudaMemcpy2D(c,sizeof(float)*ldc,cc,pitch_c,
sizeof(float)*n,n,cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end=clock();
return (end-start);
}


void matgen(float* a,int lda,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
a[i*lda+j]=(float)rand()/RAND_MAX+
(float)rand()/(RAND_MAX*RAND_MAX);
}
}
}


void matmult(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
int i,j,k;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
double t=0;
for(k=0;k<n;k++)
{
t+=a[i*lda+k]*b[k*ldb+j];
}
c[i*ldc+j]=t;
}
}
}


void compare_mat(const float* a,int lda,const float* b,
int ldb,int n)
{
float max_err=0;
float average_err=0;
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
if(b[i*ldb+j]!=0)
{
float err=fabs((a[i*lda+j]-
b[i*ldb+j])/b[i*ldb+j]);
if(max_err<err) max_err=err;
average_err+=err;
}
}
}
cout<<"Max error: "<<max_err<<"    Average error: "<<
average_err/(n*n)<<endl;
}




int main(void)
{
float *a,*b,*c,*d;
int n=1000;


a=(float*)malloc(sizeof(float)*n*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n*n);
d=(float*)malloc(sizeof(float)*n*n);


srand(0);
matgen(a,n,n);
matgen(b,n,n);
clock_t time=matmultCUDA(a,n,b,n,c,n,n);
matmult(a,n,b,n,d,n,n);
compare_mat(c,n,d,n,n);
double sec=(double)time/CLOCKS_PER_SEC;
cout<<"Time used: "<<sec<<"    "
<<2.0*n*n*n/(sec*1E9)<<"GFLOPS"<<endl;


return 0;
}


将矩阵划分为小的矩阵块,我们就可以把两个小矩阵加载到 shared memory,则小矩阵本身的乘法就不需要再存取任何外部的内存了!



++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++



#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
using namespace std;


const int num_threads=256;
const int block_size=16;


__global__ static void matMultCUDA(const float* a,size_t lda,
  const float* b,size_t ldb,
  float* c,size_t ldc,int n)
{
__shared__ float matA[block_size][block_size];
__shared__ float matB[block_size][block_size];
const int tidc=threadIdx.x;
const int tidr=threadIdx.y;
const int bidc=blockIdx.x*block_size;
const int bidr=blockIdx.y*block_size;


int i,j;
float results=0;
float comp=0;
for(j=0;j<n;j+=block_size)
{
if(tidr+bidr<n&&tidc+j<n)
{
matA[tidr][tidc]=a[(tidr+bidr)*lda+tidc+j];
}
else
{
matA[tidr][tidc]=0;
}
if(tidr+j<n&&tidc+bidc<n)
{
matB[tidr][tidc]=b[(tidr+j)*ldb+tidc+bidc];
}
else
{
matB[tidr][tidc]=0;
}
__syncthreads();
for(i=0;i<block_size;i++)
{
float t;
comp-=matA[tidr][i]*matB[i][tidc];
t=results-comp;
comp=(t-results)+comp;
results=t;
}
__syncthreads();
}//一个for循环下来就是一个16*16的块计算完毕。
if(tidr+bidr<n&&tidc+bidc<n)
{
c[(tidr+bidr)*ldc+tidc+bidc]=results;
}


}


clock_t matmultCUDA(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
float *ac,*bc,*cc;
clock_t start,end;
start=clock();
size_t pitch_a,pitch_b,pitch_c;
cudaMallocPitch((void**)&ac,&pitch_a,sizeof(float)*n,n);
cudaMallocPitch((void**)&bc,&pitch_b,sizeof(float)*n,n);
cudaMallocPitch((void**)&cc,&pitch_c,sizeof(float)*n,n);
cudaMemcpy2D(ac,pitch_a,a,sizeof(float)*lda,
sizeof(float)*n,n,cudaMemcpyHostToDevice);
cudaMemcpy2D(bc,pitch_b,b,sizeof(float)*ldb,
sizeof(float)*n,n,cudaMemcpyHostToDevice);


int bx=(n+block_size-1)/block_size;
dim3 blocks(bx,bx);
dim3 threads(block_size,block_size);
matMultCUDA<<<blocks,threads>>>
(ac,pitch_a/sizeof(float),bc,pitch_b/sizeof(float),
cc,pitch_c/sizeof(float),n);
cudaMemcpy2D(c,sizeof(float)*ldc,cc,pitch_c,
sizeof(float)*n,n,cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end=clock();
return (end-start);
}


void matgen(float* a,int lda,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
a[i*lda+j]=(float)rand()/RAND_MAX+
(float)rand()/(RAND_MAX*RAND_MAX);
}
}
}


void matmult(const float* a,int lda,const float* b,
int ldb,float* c,int ldc,int n)
{
int i,j,k;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
double t=0;
for(k=0;k<n;k++)
{
t+=a[i*lda+k]*b[k*ldb+j];
}
c[i*ldc+j]=t;
}
}
}


void compare_mat(const float* a,int lda,const float* b,
int ldb,int n)
{
float max_err=0;
float average_err=0;
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
if(b[i*ldb+j]!=0)
{
float err=fabs((a[i*lda+j]-
b[i*ldb+j])/b[i*ldb+j]);
if(max_err<err) max_err=err;
average_err+=err;
}
}
}
cout<<"Max error: "<<max_err<<"    Average error: "<<
average_err/(n*n)<<endl;
}




int main(void)
{
float *a,*b,*c,*d;
int n=1000;


a=(float*)malloc(sizeof(float)*n*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n*n);
d=(float*)malloc(sizeof(float)*n*n);


srand(0);
matgen(a,n,n);
matgen(b,n,n);
clock_t time=matmultCUDA(a,n,b,n,c,n,n);
matmult(a,n,b,n,d,n,n);
compare_mat(c,n,d,n,n);
double sec=(double)time/CLOCKS_PER_SEC;
cout<<"Time used: "<<sec<<"    "
<<2.0*n*n*n/(sec*1E9)<<"GFLOPS"<<endl;


return 0;
}















0 0
原创粉丝点击