C++ 实战之OpenCL环境搭建(一)

来源:互联网 发布:在java当中interface 编辑:程序博客网 时间:2024/06/10 20:12

前言:

接触opencl并行计算变成之前,在我的认知观中,所谓的并行应该就是应用多线程技术达到,比如openMP,openMPI等多线程技术。不过这些都是在cpu上运行,原理都是更好的利用多核处理器的硬件特性,让程序最大程度的利用了多核的优势。

接触opencl之后,认识到了opencl编程技术可以把一些复杂的代码搬运到GPU或其他加速处理器上运行,而gpu又比cpu更适应与计算比如加法,乘法等。第一感觉想到的就是opengl里面的shader,编写shader不就是把渲染相关的代码放在gpu上运行吗?其实,普通的算法代码也可以放在gpu上运行,而opencl就是实现这一技术的标准。

学习路径

花了一两周时间对opencl进行了一个多方面粗略的了解,opencl更加接近于硬件,所以有很多硬件相关的概念,比如平台结构,内存模型等,还涉及到各种指令操作,对于一开始准备学习opencl的软件开发人员来说比较生涩难懂,所以前阶段会跳过对这些硬件相关的概念的钻研,重心转移到如何用opencl标准接口编程和opencl的编程模型,软件架构等。

应用场景:

OpenCL的一个最大的优势,就是他的跨平台性,首先不同的操作系统mac,windows,android;其次不同的cpu,gpu也都支持。我目前在mac上进行opencl的开发学习,支持的版本是opencl1.2.也会在手机上进行测试,不同的硬件厂商他们都会自己实现相应的opencl库,而头文件都是标准的。

Mac上应用:

编程的IDE是mac自带的xcode,mac系统里面已经集成了opencl的sdk所以不需要去另外去下载了,只要在工程中把它加进来就可以进行opencl开发了,很方便。
下面时mac环境搭建过程,比较简单,另外目前mac最高支持opencl1.2。
这里写图片描述

点击+按钮,开始选择opencl库,直接搜索opencl就会出现,如下:
这里写图片描述

添加之后,工程项目就会多一个opencl的framework :

这里写图片描述
opencl开发有这几个标准的接口流程:

1.获取平台信息:
clGetPlatformIDs(1, &firstPlatformId, &numPlatforms);

2.创建上下文:
clCreateContextFromType(contextProperties, CL_DEVICE_TYPE_GPU,
NULL, NULL, &errNum);

3.获取设备缓冲区大小
clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &deviceBufferSize);

4.为设备分配缓存
clGetContextInfo(context, CL_CONTEXT_DEVICES, deviceBufferSize, devices, NULL);

5.选取可用的设备中的一个:
clCreateCommandQueue(context, devices[0], 0, NULL);

6.创建kernel对象和编译源代码
program = clCreateProgramWithSource(context, 1,
(const char**)&srcStr,
NULL, NULL);

errNum = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);

7.最后释放opencl资源

下面是在mac上跑的例子:

////  main.cpp//  OpenCL////  Created by xxx on 2017/9/19.//  Copyright © 2017年 xxx. All rights reserved.//#include <OpenCL/OpenCL.h>#include <iostream>#include <fstream>#include <sstream>#include <unistd.h>#include <sys/time.h>#include<time.h>#include<stdio.h>#include<stdlib.h>#include <mach/mach_time.h>const int ARRAY_SIZE = 100000;//一、 选择OpenCL平台并创建一个上下文cl_context CreateContext(){    cl_int errNum;    cl_uint numPlatforms;    cl_platform_id firstPlatformId;    cl_context context = NULL;    //选择可用的平台中的第一个    errNum = clGetPlatformIDs(1, &firstPlatformId, &numPlatforms);    if (errNum != CL_SUCCESS || numPlatforms <= 0)    {        std::cerr << "Failed to find any OpenCL platforms." << std::endl;        return NULL;    }    //创建一个OpenCL上下文环境    cl_context_properties contextProperties[] =    {        CL_CONTEXT_PLATFORM,        (cl_context_properties)firstPlatformId,        0    };    context = clCreateContextFromType(contextProperties, CL_DEVICE_TYPE_GPU,                                      NULL, NULL, &errNum);    return context;}//二、 创建设备并创建命令队列cl_command_queue CreateCommandQueue(cl_context context, cl_device_id *device){    cl_int errNum;    cl_device_id *devices;    cl_command_queue commandQueue = NULL;    size_t deviceBufferSize = -1;    // 获取设备缓冲区大小    errNum = clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &deviceBufferSize);    if (deviceBufferSize <= 0)    {        std::cerr << "No devices available.";        return NULL;    }    // 为设备分配缓存空间    devices = new cl_device_id[deviceBufferSize / sizeof(cl_device_id)];    errNum = clGetContextInfo(context, CL_CONTEXT_DEVICES, deviceBufferSize, devices, NULL);    //选取可用设备中的第一个    commandQueue = clCreateCommandQueue(context, devices[0], 0, NULL);    *device = devices[0];    delete[] devices;    return commandQueue;}// 三、创建和构建程序对象cl_program CreateProgram(cl_context context, cl_device_id device, const char* fileName){    cl_int errNum;    cl_program program;    std::ifstream kernelFile(fileName, std::ios::in);    if (!kernelFile.is_open())    {        std::cerr << "Failed to open file for reading: " << fileName << std::endl;        return NULL;    }    std::ostringstream oss;    oss << kernelFile.rdbuf();    std::string srcStdStr = oss.str();    const char *srcStr = srcStdStr.c_str();    program = clCreateProgramWithSource(context, 1,                                        (const char**)&srcStr,                                        NULL, NULL);    errNum = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);    return program;}//创建和构建程序对象bool CreateMemObjects(cl_context context, cl_mem memObjects[3],                      float *a, float *b){    memObjects[0] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,                                   sizeof(float) * ARRAY_SIZE, a, NULL);    memObjects[1] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,                                   sizeof(float) * ARRAY_SIZE, b, NULL);    memObjects[2] = clCreateBuffer(context, CL_MEM_READ_WRITE,                                   sizeof(float) * ARRAY_SIZE, NULL, NULL);    return true;}// 释放OpenCL资源void Cleanup(cl_context context, cl_command_queue commandQueue,             cl_program program, cl_kernel kernel, cl_mem memObjects[3]){    for (int i = 0; i < 3; i++)    {        if (memObjects[i] != 0)            clReleaseMemObject(memObjects[i]);    }    if (commandQueue != 0)        clReleaseCommandQueue(commandQueue);    if (kernel != 0)        clReleaseKernel(kernel);    if (program != 0)        clReleaseProgram(program);    if (context != 0)        clReleaseContext(context);}int main(int argc, char** argv){    cl_context context = 0;    cl_command_queue commandQueue = 0;    cl_program program = 0;    cl_device_id device = 0;    cl_kernel kernel = 0;    cl_mem memObjects[3] = { 0, 0, 0 };    cl_int errNum;   // uint64_t t1,t2,t3;    clock_t t1,t2,t3;    const char* filename = "/Users/xxxxx/Projects/OpenCL/OpenCL/HelloWorld.cl";    // 一、选择OpenCL平台并创建一个上下文    context = CreateContext();    // 二、 创建设备并创建命令队列    commandQueue = CreateCommandQueue(context, &device);    //创建和构建程序对象    program = CreateProgram(context, device, filename);//"HelloWorld.cl");    // 四、 创建OpenCL内核并分配内存空间    kernel = clCreateKernel(program, "hello_kernel", NULL);    //创建要处理的数据    float result[ARRAY_SIZE];    float a[ARRAY_SIZE];    float b[ARRAY_SIZE];    for (int i = 0; i < ARRAY_SIZE; i++)    {        a[i] = (float)i;        b[i] = (float)(ARRAY_SIZE - i);    }    t1 = clock();  //mach_absolute_time();    printf("t1 = %.8f\n",(double)t1);    for(int j = 0;j <  ARRAY_SIZE;j++){        result[j] = a[j]+b[j];    }    t2 = clock(); //mach_absolute_time();    printf("t2 = %.8f\n",(double)t2);    //创建内存对象    if (!CreateMemObjects(context, memObjects, a, b))    {        Cleanup(context, commandQueue, program, kernel, memObjects);        return 1;    }    // 五、 设置内核数据并执行内核    errNum = clSetKernelArg(kernel, 0, sizeof(cl_mem), &memObjects[0]);    errNum |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &memObjects[1]);    errNum |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &memObjects[2]);    size_t globalWorkSize[1] = { ARRAY_SIZE };    size_t localWorkSize[1] = { 1 };    errNum = clEnqueueNDRangeKernel(commandQueue, kernel, 1, NULL,                                    globalWorkSize, localWorkSize,                                    0, NULL, NULL);    // 六、 读取执行结果并释放OpenCL资源    errNum = clEnqueueReadBuffer(commandQueue, memObjects[2], CL_TRUE,                                 0, ARRAY_SIZE * sizeof(float), result,                                 0, NULL, NULL);    t3 = clock();  //mach_absolute_time();    printf("cpu t = %.8f\n",(float)(t2-t1)/CLOCKS_PER_SEC);    printf("gpu t = %.8f \n",(double)(t3-t2)/CLOCKS_PER_SEC);    //std::cout<<"the noemal delta is = "<< CPU<<std::endl;   // std::cout<<"the opencl delta is = "<<(t3-t2)/CLOCKS_PER_SEC<<std::endl;//    for (int i = 0; i < ARRAY_SIZE; i++)//    {//        std::cout << result[i] << " ";//    }    std::cout << std::endl;    std::cout << "Executed program succesfully." << std::endl;    getchar();    Cleanup(context, commandQueue, program, kernel, memObjects);    return 0;}

下面是kernel 源文件:

__kernel void hello_kernel(__global const float *a,                           __global const float *b,                           __global float *result){    int gid = get_global_id(0);    result[gid] = a[gid] + b[gid];}

下面是在macbook pro 上的运行结果:

t1 = 143206.00000000t2 = 143562.00000000cpu t = 0.00035600gpu t = 0.00155900 Executed program succesfully.

看这个结果是cpu耗时更短,这个在情理之中,随着维数和计算量增大,gpu的优势才会体现,后面会逐步证明。

github地址:https://github.com/myhouselove/mac-opencl

Android 手机上的应用

我用android studio 2.2以上的版本搭建的ndk & cmake native工程开发学习opencl:
下面是搭建过程:
1.按照网上所说新建一个cmake的android工程,具体可以百度一下,这里是我的另外一篇博客的介绍:http://blog.csdn.net/w401229755/article/details/75810028

然后最重要的部分就是找到具体手机的opencl sdk和标准的头文件。我用的小米4 ,他的sdk就在/system/vendor/lib/libOpenCL.so下。我把他pull出来放在android工程中使用。

配置cmakelist.txt

# For more information about using CMake with Android Studio, read the# documentation: https://d.android.com/studio/projects/add-native-code.html# Sets the minimum version of CMake required to build the native library.cmake_minimum_required(VERSION 3.4.1)# Creates and names a library, sets it as either STATIC# or SHARED, and provides the relative paths to its source code.# You can define multiple libraries, and CMake builds them for you.# Gradle automatically packages shared libraries with your APK.set(CMAKE_VERBOSE_MAKEFILE on)set(libs "${CMAKE_SOURCE_DIR}/src/main/jniLibs")include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include)#--------------------------------------------------- import ---------------------------------------------------#add_library(libOpenCL SHARED IMPORTED )set_target_properties(libOpenCL PROPERTIES    IMPORTED_LOCATION "${libs}/${ANDROID_ABI}/libOpenCL.so")add_library( # Sets the name of the library.             native-lib             # Sets the library as a shared library.             SHARED             # Provides a relative path to your source file(s).             src/main/cpp/native-lib.cpp )# Searches for a specified prebuilt library and stores the path as a# variable. Because CMake includes system libraries in the search path by# default, you only need to specify the name of the public NDK library# you want to add. CMake verifies that the library exists before# completing its build.find_library( # Sets the name of the path variable.              log-lib              # Specifies the name of the NDK library that              # you want CMake to locate.              log )# Specifies libraries CMake should link to your target library. You# can link multiple libraries, such as libraries you define in this# build script, prebuilt third-party libraries, or system libraries.target_link_libraries( # Specifies the target library.                       native-lib libOpenCL                       # Links the target library to the log library                       # included in the NDK.                       ${log-lib} )

下面是c++的源代码:

#include <jni.h>#include <stdlib.h>#include <string>#include <opencl.h>#include <android/log.h>#include <iostream>#include <fstream>#include <sstream>#include <unistd.h>#include <sys/time.h>#include<time.h>#include<stdio.h>#include<stdlib.h>#define TAG OpenCL#define LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,"OPENCL",__VA_ARGS__)const int ARRAY_SIZE = 100000;//一、 选择OpenCL平台并创建一个上下文cl_context CreateContext(){    cl_int errNum;    cl_uint numPlatforms;    cl_platform_id firstPlatformId;    cl_context context = NULL;    //选择可用的平台中的第一个    errNum = clGetPlatformIDs(1, &firstPlatformId, &numPlatforms);    if (errNum != CL_SUCCESS || numPlatforms <= 0)    {        std::cerr << "Failed to find any OpenCL platforms." << std::endl;        return NULL;    }    //创建一个OpenCL上下文环境    cl_context_properties contextProperties[] =            {                    CL_CONTEXT_PLATFORM,                    (cl_context_properties)firstPlatformId,                    0            };    context = clCreateContextFromType(contextProperties, CL_DEVICE_TYPE_GPU,                                      NULL, NULL, &errNum);    return context;}//二、 创建设备并创建命令队列cl_command_queue CreateCommandQueue(cl_context context, cl_device_id *device){    cl_int errNum;    cl_device_id *devices;    cl_command_queue commandQueue = NULL;    size_t deviceBufferSize = -1;    // 获取设备缓冲区大小    errNum = clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &deviceBufferSize);    if (deviceBufferSize <= 0)    {        std::cerr << "No devices available.";        return NULL;    }    // 为设备分配缓存空间    devices = new cl_device_id[deviceBufferSize / sizeof(cl_device_id)];    errNum = clGetContextInfo(context, CL_CONTEXT_DEVICES, deviceBufferSize, devices, NULL);    //选取可用设备中的第一个    commandQueue = clCreateCommandQueue(context, devices[0], 0, NULL);    *device = devices[0];    delete[] devices;    return commandQueue;}// 三、创建和构建程序对象cl_program CreateProgram(cl_context context, cl_device_id device, const char* fileName){    cl_int errNum;    cl_program program;    std::ifstream kernelFile(fileName, std::ios::in);    if (!kernelFile.is_open())    {        LOGD("Failed to open file for reading: %s\n" , fileName );        return NULL;    }    std::ostringstream oss;    oss << kernelFile.rdbuf();    std::string srcStdStr = oss.str();    const char *srcStr = srcStdStr.c_str();    program = clCreateProgramWithSource(context, 1,                                        (const char**)&srcStr,                                        NULL, NULL);    errNum = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);    return program;}//创建和构建程序对象bool CreateMemObjects(cl_context context, cl_mem memObjects[3],                      float *a, float *b){    memObjects[0] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,                                   sizeof(float) * ARRAY_SIZE, a, NULL);    memObjects[1] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,                                   sizeof(float) * ARRAY_SIZE, b, NULL);    memObjects[2] = clCreateBuffer(context, CL_MEM_READ_WRITE,                                   sizeof(float) * ARRAY_SIZE, NULL, NULL);    return true;}// 释放OpenCL资源void Cleanup(cl_context context, cl_command_queue commandQueue,             cl_program program, cl_kernel kernel, cl_mem memObjects[3]){    for (int i = 0; i < 3; i++)    {        if (memObjects[i] != 0)            clReleaseMemObject(memObjects[i]);    }    if (commandQueue != 0)        clReleaseCommandQueue(commandQueue);    if (kernel != 0)        clReleaseKernel(kernel);    if (program != 0)        clReleaseProgram(program);    if (context != 0)        clReleaseContext(context);}int test(){    cl_context context = 0;    cl_command_queue commandQueue = 0;    cl_program program = 0;    cl_device_id device = 0;    cl_kernel kernel = 0;    cl_mem memObjects[3] = { 0, 0, 0 };    cl_int errNum;    // uint64_t t1,t2,t3;    clock_t t1,t2,t3;    const char* filename = "HelloWorld.cl";    // 一、选择OpenCL平台并创建一个上下文    context = CreateContext();    // 二、 创建设备并创建命令队列    commandQueue = CreateCommandQueue(context, &device);    //创建和构建程序对象    program = CreateProgram(context, device, filename);//"HelloWorld.cl");    // 四、 创建OpenCL内核并分配内存空间    kernel = clCreateKernel(program, "hello_kernel", NULL);    //创建要处理的数据    float result[ARRAY_SIZE];    float a[ARRAY_SIZE];    float b[ARRAY_SIZE];    for (int i = 0; i < ARRAY_SIZE; i++)    {        a[i] = (float)i;        b[i] = (float)(ARRAY_SIZE - i);    }    t1 = clock();  //mach_absolute_time();    LOGD("t1 = %.8f\n",(double)t1);    for(int j = 0;j <  ARRAY_SIZE;j++){        result[j] = a[j]+b[j];    }    t2 = clock(); //mach_absolute_time();    LOGD("t2 = %.8f\n",(double)t2);    //创建内存对象    if (!CreateMemObjects(context, memObjects, a, b))    {        Cleanup(context, commandQueue, program, kernel, memObjects);        return 1;    }    // 五、 设置内核数据并执行内核    errNum = clSetKernelArg(kernel, 0, sizeof(cl_mem), &memObjects[0]);    errNum |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &memObjects[1]);    errNum |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &memObjects[2]);    size_t globalWorkSize[1] = { ARRAY_SIZE };    size_t localWorkSize[1] = { 1 };    errNum = clEnqueueNDRangeKernel(commandQueue, kernel, 1, NULL,                                    globalWorkSize, localWorkSize,                                    0, NULL, NULL);    // 六、 读取执行结果并释放OpenCL资源    errNum = clEnqueueReadBuffer(commandQueue, memObjects[2], CL_TRUE,                                 0, ARRAY_SIZE * sizeof(float), result,                                 0, NULL, NULL);    t3 = clock();  //mach_absolute_time();    LOGD("cpu t = %.8f\n",(float)(t2-t1)/CLOCKS_PER_SEC);    LOGD("gpu t = %.8f \n",(double)(t3-t2)/CLOCKS_PER_SEC);    LOGD("Executed program succesfully.");    getchar();    Cleanup(context, commandQueue, program, kernel, memObjects);    return 0;}extern "C"JNIEXPORT jstring JNICALLJava_com_example_wangmingyong_opencl_MainActivity_stringFromJNI(        JNIEnv *env,        jobject /* this */) {    test();    std::string hello = "Hello from C++";    return env->NewStringUTF(hello.c_str());}

记得添加cl文件,这边cl文件跟上面mac中的cl一样。

具体代码可以看一下github 地址:https://github.com/myhouselove/OpenCL-android

总结

上面分别是opencl在mac和android上面的初步简单应用,属于很简单的开发环境入门,证明opencl的跨平台性还是很强的,同样的代码基本不要做什么修改,后续会结合opencv的一些矩阵算法,具体学习分析一下opencl的编程和性能方面的知识点。

原创粉丝点击