CUDA之矩阵乘法——非方阵计算

来源:互联网 发布:c语言为什么要加入指针 编辑:程序博客网 时间:2024/06/06 05:57

说明

A矩阵为M * N,B矩阵为N * M

代码

#include "device_functions.h"#include "cuda_runtime.h"#include "device_launch_parameters.h"#include <stdio.h>#include <iostream>typedef struct {    int width;    int height;    int stride;    float* elements;} Matrix;#define BLOCK_SIZE  2#define N           4#define M           8__device__ float GetElement(const Matrix A, int row, int col) {    return A.elements[row * A.stride + col];}__device__ void SetElement(Matrix A, int row, int col, float value) {    A.elements[row * A.stride + col] = value;}__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {    Matrix Asub;    Asub.width = BLOCK_SIZE;    Asub.height = BLOCK_SIZE;    Asub.stride = A.stride;    Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row+ BLOCK_SIZE * col];    return Asub;}__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {    int blockRow = blockIdx.y;    int blockCol = blockIdx.x;    Matrix Csub = GetSubMatrix(C, blockRow, blockCol);    float Cvalue = 0;    int row = threadIdx.y;    int col = threadIdx.x;    for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {        Matrix Asub = GetSubMatrix(A, blockRow, m);        Matrix Bsub = GetSubMatrix(B, m, blockCol);        __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];        __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];        As[row][col] = GetElement(Asub, row, col);        Bs[row][col] = GetElement(Bsub, row, col);        __syncthreads();        for (int e = 0; e < BLOCK_SIZE; ++e)            Cvalue += As[row][e] * Bs[e][col];        __syncthreads();    }    SetElement(Csub, row, col, Cvalue);}void MatMul(const Matrix A, const Matrix B, Matrix C) {    Matrix d_A;    d_A.width = d_A.stride = A.width;     d_A.height = A.height;    size_t size = A.width * A.height * sizeof(float);    cudaMalloc((void**)&d_A.elements, size);    cudaMemcpy(d_A.elements, A.elements, size,    cudaMemcpyHostToDevice);    Matrix d_B;    d_B.width = d_B.stride = B.width;     d_B.height = B.height;    size = B.width * B.height * sizeof(float);    cudaMalloc((void**)&d_B.elements, size);    cudaMemcpy(d_B.elements, B.elements, size,    cudaMemcpyHostToDevice);    Matrix d_C;    d_C.width = d_C.stride = C.width;     d_C.height = C.height;    size = C.width * C.height * sizeof(float);    cudaMalloc((void**)&d_C.elements, size);    dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);    dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);    MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);    cudaMemcpy(C.elements, d_C.elements, size,    cudaMemcpyDeviceToHost);    cudaFree(d_A.elements);    cudaFree(d_B.elements);    cudaFree(d_C.elements);}void init(Matrix *A,int row, int col) {    A->width=col;    A->height=row;    A->stride=col;    A->elements=(float*) calloc(row*col, sizeof(float));    for(int i=0; i<row*col; i++) A->elements[i]=i;}int main() {    //定义四个矩阵,C=A*B为分割成块使用sharedmemory;C_ref=A*B直接计算;    Matrix A, B, C;    //初始化四个矩阵,赋初值。    init(&A,M,N);    init(&B,N,M);    init(&C,M,M);    int i, j;    printf("A\n");    for(i=0; i<M*N; i++){        printf("%f \t",A.elements[i]);    }    printf("\n");    printf("A\n");    for(i=0; i<M; i++){        for(j=0; j<N; j++) {            printf("%f \t",A.elements[i*N+j]);        }        printf("\n");    }    printf("B\n");    for(i=0; i<N; i++){        for(j=0; j<M; j++) {            printf("%f \t",A.elements[i*M+j]);        }        printf("\n");    }    MatMul(A, B, C);    printf("C\n");    for(i=0; i<M; i++){        for(j=0; j<M; j++) {            printf("%f \t",C.elements[j*M+i]);        }        printf("\n");    }}
0 0