如何将cuda静态库的CMAKE从debug改成release?

How to change the CMAKE of cuda static library from debug to release?

我是 CMAKE 的新手。我想将cuda中的内核作为静态库使用,用“extern "C" void function();”来调用。最后我将使用 cmake 来编译整个项目。但它在 GPU 中的 运行ning 速度并不令我满意。所以我在调试和发布中分别使用 Nsight eclispe 运行 它。在NVVP.I中分析后发现静态库中cmake的默认模式是调试模式。

那么如何在静态库中将debug模式改为release呢?

首先,我在 Nsight eclipse 中创建一个项目。

下面是我的文件结构示例。

Test_in_stack  
-release
-debug
-src  
--GPU.cu
--simpleCUFFT.cu
-lib
--GPU.cuh
--Kernels.h

src/simpleCUFFT.cu的内容为:

// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

// includes, project
// #include <Kernels.h>
#include <GPU.cuh>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_functions.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/sequence.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/equal.h>
#include <thrust/for_each.h>


// Complex data type
typedef float2 Complex;

#define FFT_NUM 1024
#define RANGE_NUM 1024
#define SIGNAL_SIZE RANGE_NUM*FFT_NUM

extern "C" void GPU_Pro(Complex *h_signal,int *h_count);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main()
{
    Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
    int *h_count = (int *)malloc(sizeof(int) * SIGNAL_SIZE);
    // Initialize the memory for the signal
    for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
    {
        h_signal[i].x = rand() / (float)RAND_MAX;
        h_signal[i].y = rand() / (float)RAND_MAX;
        h_count[i]=i/FFT_NUM;
    }
    GPU_Pro(h_signal,h_count);

    cudaDeviceReset();
}

src/GPU.cu的内容是:

#include <Kernels.h>
#include <GPU.cuh>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_functions.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/sequence.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/equal.h>
#include <thrust/for_each.h>

typedef float2 Complex;
#define FFT_NUM 1024
#define RANGE_NUM 1024
#define SIGNAL_SIZE RANGE_NUM*FFT_NUM

void GPU_Pro(Complex *h_signal,int *h_count)
{
    Complex *d_signal;
    float *d_signal_float;
    int *d_count;
    cudaMalloc((void **)&d_signal, SIGNAL_SIZE*sizeof(Complex));
    cudaMalloc((void **)&d_count, SIGNAL_SIZE*sizeof(int));
    cudaMalloc((void **)&d_signal_float, SIGNAL_SIZE*sizeof(float));
    cufftHandle plan;
    checkCudaErrors(cufftPlan1d(&plan, FFT_NUM, CUFFT_C2C, 1));
    dim3 dimblock(32, 32);
    dim3 dimgrid(FFT_NUM / 32, RANGE_NUM / 32);

    // Copy host memory to device
    checkCudaErrors(cudaMemcpy(d_signal, h_signal, SIGNAL_SIZE*sizeof(Complex),
                                   cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_count, h_count, SIGNAL_SIZE*sizeof(int),
                                       cudaMemcpyHostToDevice));
    for(int i=0;i<RANGE_NUM;i++)
    {
        checkCudaErrors(cufftExecC2C(plan, d_signal+i*RANGE_NUM, d_signal+i*RANGE_NUM, CUFFT_FORWARD));
    }
    MatAbsNaive_float<<<dimgrid,dimblock>>>(d_signal,d_signal_float,FFT_NUM,RANGE_NUM);
    thrust::stable_sort_by_key(thrust::device_pointer_cast(d_signal_float),thrust::device_pointer_cast(d_signal_float)+SIGNAL_SIZE,thrust::device_pointer_cast(d_count));
    thrust::stable_sort_by_key(thrust::device_pointer_cast(d_count),thrust::device_pointer_cast(d_count)+SIGNAL_SIZE,thrust::device_pointer_cast(d_signal_float));

    cudaDeviceReset();
}

lib/Kernels.h的内容为:

/*
 * Kernels.h
 *
 *  Created on: Jan 10, 2019
 *      Author: root
 */
#include "iostream"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
#include <stdlib.h>
#include <string.h>
#include "math.h"
#include <mat.h>
#include "cuComplex.h"
#include "cublas.h"
#include <cufft.h>
#include <cufftXt.h>
#include <time.h>
#include <cublas_v2.h>


__global__ void MatAbsNaive_float(cuComplex *idata, float *odata, int M, int N)
{
    int x = blockIdx.x * blockDim.x + threadIdx.x;
    int y = blockIdx.y * blockDim.y + threadIdx.y;
    if ((x < M) && (y < N))
    {
        odata[x + M*y] = sqrt(idata[x + M*y].x * idata[x + M*y].x + idata[x + M*y].y * idata[x + M*y].y);
    }
}

lib/GPU.cuh的内容是:

#ifndef GPU_CUH
#define GPU_CUH

#include <stdio.h>
#include "cuComplex.h"
typedef float2 Complex;
extern "C"
void GPU_Pro(Complex *h_signal,int *h_count);
#endif   

NVVP在debug和release中的结果如下:
debug release

然后我将相同的文件放入cmake。

下面是我的文件结构示例。

Test_in_stack
-CMakeLists(1).txt
-build
-src  
--CMakeLists(2).txt
--simpleCUFFT.cpp
-lib
--CMakeLists(3).txt
--GPU.cu
--GPU.cuh
--Kernels.h

(1)、(2)、(3)为标签,真实文件名均为CMakeLists.txt。而simpleCUFFT.cu和simpleCUFFT.cpp的内容是一样的
CMakeLists(1).txt的内容为:

cmake_minimum_required (VERSION 2.6)

PROJECT(GPU_MODE C CXX)
#PROJECT(GPU_MODE)
ADD_SUBDIRECTORY(src bin)
ADD_SUBDIRECTORY(lib)

CMakeLists(2).txt的内容为:

INCLUDE_DIRECTORIES(
${eclipse_home}VSPS/include 
/usr/include 
${eclipse_home}PetDCPS/include 
/user/include/c++ 
/usr/local/cuda-8.0/include 
                   )

INCLUDE_DIRECTORIES(/root/Chenjie/cuda-workspace/Test_in_stack/lib 
/usr/local/cuda-8.0/samples/common/inc  
/usr/local/cuda-8.0/include)
LINK_DIRECTORIES(/usr/local/cuda-8.0/lib64/)

SET(CPU_LIST simpleCUFFT.cpp)
FIND_PACKAGE(CUDA REQUIRED)
SET(EXTRA_LIBS ${EXTRA_LIBS} gpu ${CUDA_LIBRARIES})
ADD_EXECUTABLE(CPUProcessTest ${CPU_LIST})
SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/src)
TARGET_LINK_LIBRARIES(CPUProcessTest optimized ${EXTRA_LIBS} vsip_c)

CMakeLists(3).txt的内容为:

#for cuda

PROJECT(gpu)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
INCLUDE_DIRECTORIES(/root/Chenjie/cuda-workspace/Test_in_stack/lib 
/usr/local/cuda-8.0/samples/common/inc  
/usr/local/cuda-8.0/include)
FIND_PACKAGE(CUDA QUIET REQUIRED)
#SET(CUDA_NVCC_FLAGS -03;-G;-g)
SET(CUDA_NVCC_FLAGS -gencode arch=compute_52,code=sm_52;-G;-g;-lcufft;-lcudart;-lcublas)
SET(CMAKE_CUDA_FLAGS ${CUDA_NVCC_FLAGS_RELEASE})
FILE(GLOB_RECURSE CURRENT_HEADERS *.h *.hpp *.cuh)
FILE(GLOB CURRENT_SOURCES *.cpp *.cu)

SOURCE_GROUP("Include" FILES ${CURRENT_HEADERS})
SOURCE_GROUP("Source" FILES ${CURRENT_SOURCES})

INCLUDE_DIRECTORIES(/usr/local/cuda-8.0/include)
LINK_DIRECTORIES(/usr/local/cuda-8.0/lib64/)
LINK_LIBRARIES(cufft cublas)

#TARGET_LINK_LIBRARIES(gpu ${CUDA_LIBRARIES})
#CUDA_ADD_LIBRARY(gpu SHARED ${CURRENT_HEADERS} ${CURRENT_SOURCES})
CUDA_ADD_LIBRARY(gpu STATIC ${CURRENT_HEADERS} ${CURRENT_SOURCES} ${CUDA_LIBRARIES} ${CUDA_CUFFT_LIBRARIES})

我在/built中使用命令行如下:

cmake -DCMAKE_BUILD_TYPE=Release ..
make

但是没有用。正如 NVVP 结果所示,它似乎仍在 运行 调试中: cmake result
那么如何更改编译标志以在cuda的静态库中发布。
我使用的是Red Hat Enterprise Linux Server 7.1(Maipo)、cuda 8.0 、cmake version 2.8.12.2、GNU Make 3.82.

2019.01.12更新

我在 CMakeLists(2).txt 中添加了 MESSAGE(STATUS "Build type:" ${CMAKE_BUILD_TYPE}")。结果是:

[root@node2 build]# cmake -DCMAKE_BUILD_TYPE=Release ..
-- Build type: Release
-- Configuring done
-- Generating done

但是 NVVP 中的结果没有改变。

嗯,我找到方法解决了
我将 CMakeLists(3).txt 中的第 10 行和第 11 行更改为

SET(CUDA_NVCC_FLAGS -gencode arch=compute_52,code=sm_52;-lcufft;-lcudart;-lcublas)
SET(CMAKE_CUDA_FLAGS ${CUDA_NVCC_FLAGS} -O3 -DNDEBUG)

做完后

cmake -DCMAKE_BUILD_TYPE=Release ..
make clean
make

NVVP 中的结果表明它是使用 Release 模式编译的。