在 GPU 中共享许多 Gauss-Legendre 正交的根和权重

Sharing roots and weights for many Gauss-Legendre Quadrature in GPUs

我打算以并行方式计算大量数值正交,最终使用一组通用数据进行所有计算(相当大的根和权重数组占用大约 25 Kb 的内存). Gauss-Legendre 求积法非常简单,可以作为入门。我想通过声明 device double *d_droot, *d_dweight 使设备中的所有线程、根和权重可用。但是我遗漏了一些东西,因为我必须明确地将指针传递给数组才能使我的内核正常工作。我怎样才能正确地做到这一点?更重要的是,为了在设备上有更多可用内存,是否可以将根和权重刻录到设备内存的某个常量部分?

代码已附上

#include <math.h>
#include <stdlib.h>
#include <stdio.h>


__device__  double *d_droot, *d_dweight;


__device__ __host__
double f(double alpha,double x)
{
  /*function to be integrated via gauss-legendre quadrature. */
  return exp(alpha*x);
}

__global__
void lege_inte2(int n, double alpha, double a, double b, double *lroots, double *weight, double *result)
{
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    lroots[]: roots for the quadrature
    weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    result[i] += weight[dummy] * f(alpha,c1 * lroots[dummy] + c2)*c1;
    }
}

__global__
void lege_inte2_shared(int n,double alpha, double a, double b,  double *result)
{
  extern __shared__ double *d_droot;
  extern __shared__ double *d_dweight;
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    d_root[]: roots for the quadrature
    d_weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    {
      result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;
      printf(" Vale: %f \n", d_dweight[dummy]);
    }
    }
}


int main(void)
{
  int N = 1<<23;
  int N_nodes = 5;


  double *droot, *dweight, *dresult, *d_dresult;


  /*double version in host*/
  droot =(double*)malloc(N_nodes*sizeof(double));
  dweight =(double*)malloc(N_nodes*sizeof(double));
  dresult =(double*)malloc(N*sizeof(double)); /*will recibe the results of N quadratures!*/


  /*double version in device*/
  cudaMalloc(&d_droot, N_nodes*sizeof(double));
  cudaMalloc(&d_dweight, N_nodes*sizeof(double));
  cudaMalloc(&d_dresult, N*sizeof(double)); /*results for N quadratures will be contained here*/


  /*double version of the roots and weights*/
  droot[0] = 0.90618;
  droot[1] = 0.538469;
  droot[2] = 0.0;
  droot[3] = -0.538469;
  droot[4] = -0.90618;


  dweight[0] = 0.236927;
  dweight[1] = 0.478629;
  dweight[2] = 0.568889;
  dweight[3] = 0.478629;
  dweight[4] = 0.236927;



  /*double copy host-> device*/
  cudaMemcpy(d_droot, droot, N_nodes*sizeof(double), cudaMemcpyHostToDevice);
  cudaMemcpy(d_dweight, dweight, N_nodes*sizeof(double), cudaMemcpyHostToDevice);


  // Perform SAXPY on 1M element

  lege_inte2<<<(N+255)/256, 256>>>(N,1.0,  -3.0, 3.0, d_droot, d_dweight, d_dresult); /*This kerlnel works OK*/
  //lege_inte2_shared<<<(N+255)/256, 256>>>(N,  -3.0, 3.0,  d_dresult); /*why this one does not work? */





  cudaMemcpy(dresult, d_dresult, N*sizeof(double), cudaMemcpyDeviceToHost); 

  double maxError = 0.0f;
  for (int i = 0; i < N; i++)
    maxError = max(maxError, abs(dresult[i]-20.03574985));
  printf("Max error: %f in %i quadratures \n", maxError, N);
  printf("integral: %f  \n" ,dresult[0]);



  cudaFree(dresult);
  cudaFree(d_droot);
  cudaFree(d_dweight);

}

和编译它的 makefile:

objects = main.o 

all: $(objects)
        nvcc   -Xcompiler -std=c99 -arch=sm_20 $(objects) -o gauss

%.o: %.cpp
        nvcc -x cu -arch=sm_20  -I. -dc $< -o $@

clean:
        rm -f *.o gauss

提前感谢您的任何建议

您对 d_drootd_dweight 的处理存在多种错误。当我编译你的代码时,我收到了这样的各种警告:

t640.cu(86): warning: address of a __shared__ variable "d_droot" cannot be directly taken in a host function

t640.cu(87): warning: address of a __shared__ variable "d_dweight" cannot be directly taken in a host function

t640.cu(108): warning: a __shared__ variable "d_droot" cannot be directly read in a host function

t640.cu(109): warning: a __shared__ variable "d_dweight" cannot be directly read in a host function

不容忽视。

  1. 这些声明:

    __device__  double *d_droot, *d_dweight;
    

    不定义 __shared__ 个变量,所以这些行:

    extern __shared__ double *d_droot;
    extern __shared__ double *d_dweight;
    

    没有意义。此外,如果您确实希望这些是 dynamically allocated shared variablesextern __shared__ 的用途),您需要将分配大小作为第三个内核启动参数传递,而您没有这样做。

  2. 这些说法是不正确的:

    cudaMalloc(&d_droot, N_nodes*sizeof(double));
    cudaMalloc(&d_dweight, N_nodes*sizeof(double));
    

    您不能在主机代码中获取 __device__ 变量的地址,而且我们也不使用 cudaMalloc 分配 __device__ 变量;根据定义,它是静态分配。

  3. 我建议进行适当的 cuda 错误检查。作为快速测试,您还可以 运行 您的代码 cuda-memcheck。这两种方法都表明您的代码中存在 运行 时间错误(尽管不是任何问题的关键)。

  4. 这些说法也不正确:

    cudaMemcpy(d_droot, droot, N_nodes*sizeof(double), cudaMemcpyHostToDevice);
    cudaMemcpy(d_dweight, dweight, N_nodes*sizeof(double), cudaMemcpyHostToDevice);
    

    cudaMemcpynot the correct API to use__device__ 变量。请改用 cudaMemcpyToSymbol

以下代码修复了这些各种使用错误,可以干净地编译,并且似乎 运行 正确。它表明没有必要将 __device__ 变量作为内核参数传递:

#include <math.h>
#include <stdlib.h>
#include <stdio.h>


__device__  double *d_droot, *d_dweight;


__device__ __host__
double f(double alpha,double x)
{
  /*function to be integrated via gauss-legendre quadrature. */
  return exp(alpha*x);
}

__global__
void lege_inte2(int n, double alpha, double a, double b, double *result)
{
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    lroots[]: roots for the quadrature
    weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;
    }
}

__global__
void lege_inte2_shared(int n,double alpha, double a, double b,  double *result)
{
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    d_root[]: roots for the quadrature
    d_weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    {
      result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;
      printf(" Vale: %f \n", d_dweight[dummy]);
    }
    }
}


int main(void)
{
  int N = 1<<23;
  int N_nodes = 5;


  double *droot, *dweight, *dresult, *d_dresult, *d_droot_temp, *d_dweight_temp;


  /*double version in host*/
  droot =(double*)malloc(N_nodes*sizeof(double));
  dweight =(double*)malloc(N_nodes*sizeof(double));
  dresult =(double*)malloc(N*sizeof(double)); /*will recibe the results of N quadratures!*/


  /*double version in device*/
  cudaMalloc(&d_droot_temp, N_nodes*sizeof(double));
  cudaMalloc(&d_dweight_temp, N_nodes*sizeof(double));
  cudaMalloc(&d_dresult, N*sizeof(double)); /*results for N quadratures will be contained here*/


  /*double version of the roots and weights*/
  droot[0] = 0.90618;
  droot[1] = 0.538469;
  droot[2] = 0.0;
  droot[3] = -0.538469;
  droot[4] = -0.90618;


  dweight[0] = 0.236927;
  dweight[1] = 0.478629;
  dweight[2] = 0.568889;
  dweight[3] = 0.478629;
  dweight[4] = 0.236927;



  /*double copy host-> device*/
  cudaMemcpy(d_droot_temp, droot, N_nodes*sizeof(double), cudaMemcpyHostToDevice);
  cudaMemcpy(d_dweight_temp, dweight, N_nodes*sizeof(double), cudaMemcpyHostToDevice);
  cudaMemcpyToSymbol(d_droot, &d_droot_temp, sizeof(double *));
  cudaMemcpyToSymbol(d_dweight, &d_dweight_temp, sizeof(double *));

  // Perform SAXPY on 1M element

  lege_inte2<<<(N+255)/256, 256>>>(N,1.0,  -3.0, 3.0, d_dresult); /*This kerlnel works OK*/
  //lege_inte2_shared<<<(N+255)/256, 256>>>(N,  -3.0, 3.0,  d_dresult); /*why this one does not work? */





  cudaMemcpy(dresult, d_dresult, N*sizeof(double), cudaMemcpyDeviceToHost);

  double maxError = 0.0f;
  for (int i = 0; i < N; i++)
    maxError = max(maxError, abs(dresult[i]-20.03574985));
  printf("Max error: %f in %i quadratures \n", maxError, N);
  printf("integral: %f  \n" ,dresult[0]);



  cudaFree(d_dresult);
  cudaFree(d_droot_temp);
  cudaFree(d_dweight_temp);

}

(我不能保证结果。)

现在,关于这个问题:

Even more, aiming to have available more free memory on the device, is it possible to burn the roots and weights to some constant portion of the memory of the device?

由于您对 d_dweightd_droot 的访问似乎是一致的:

result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;

然后它may be useful to define these作为__constant__内存space变量。当 warp 中的每个线程都在常量内存中请求相同的值(相同的位置)时,常量内存访问是最佳的。但是,__constant__ 内存不能动态分配,并且(仅)将指针存储在常量内存中是没有意义的;这不提供常量缓存机制的任何好处。

因此,以下对代码的进一步修改演示了如何将这些值存储在 __constant__ 内存中,但它需要静态分配。此外,这实际上并不 "save" 任何设备内存。无论您是使用 cudaMalloc 动态分配、使用 __device__ 变量静态分配,还是通过 __constant__ 变量定义(也是静态分配),all这些方法需要设备内存(板载 DRAM)中的全局内存后备存储。

演示可能的常量内存使用的代码:

#include <math.h>
#include <stdlib.h>
#include <stdio.h>

#define N_nodes 5

__constant__   double d_droot[N_nodes], d_dweight[N_nodes];


__device__ __host__
double f(double alpha,double x)
{
  /*function to be integrated via gauss-legendre quadrature. */
  return exp(alpha*x);
}

__global__
void lege_inte2(int n, double alpha, double a, double b, double *result)
{
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    lroots[]: roots for the quadrature
    weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;
    }
}

__global__
void lege_inte2_shared(int n,double alpha, double a, double b,  double *result)
{
  /*
    Parameters:
    n: Total number of quadratures
    a: Upper integration limit
    b: Lower integration limit
    d_root[]: roots for the quadrature
    d_weight[]: weights for the quadrature
    result[]: allocate the results for N quadratures.
   */
  double c1 = (b - a) / 2, c2 = (b + a) / 2, sum = 0;
  int dummy;

  int i = blockIdx.x*blockDim.x + threadIdx.x;
  if (i < n)
    {
      result[i] = 0.0;
      for (dummy = 0; dummy < 5; dummy++)
    {
      result[i] += d_dweight[dummy] * f(alpha,c1 * d_droot[dummy] + c2)*c1;
      printf(" Vale: %f \n", d_dweight[dummy]);
    }
    }
}


int main(void)
{
  int N = 1<<23;
 // int N_nodes = 5;


  double *droot, *dweight, *dresult, *d_dresult;


  /*double version in host*/
  droot =(double*)malloc(N_nodes*sizeof(double));
  dweight =(double*)malloc(N_nodes*sizeof(double));
  dresult =(double*)malloc(N*sizeof(double)); /*will recibe the results of N quadratures!*/


  /*double version in device*/
  cudaMalloc(&d_dresult, N*sizeof(double)); /*results for N quadratures will be contained here*/


  /*double version of the roots and weights*/
  droot[0] = 0.90618;
  droot[1] = 0.538469;
  droot[2] = 0.0;
  droot[3] = -0.538469;
  droot[4] = -0.90618;


  dweight[0] = 0.236927;
  dweight[1] = 0.478629;
  dweight[2] = 0.568889;
  dweight[3] = 0.478629;
  dweight[4] = 0.236927;



  /*double copy host-> device*/
  cudaMemcpyToSymbol(d_droot, droot, N_nodes*sizeof(double));
  cudaMemcpyToSymbol(d_dweight, dweight, N_nodes*sizeof(double));

  // Perform SAXPY on 1M element

  lege_inte2<<<(N+255)/256, 256>>>(N,1.0,  -3.0, 3.0, d_dresult); /*This kerlnel works OK*/
  //lege_inte2_shared<<<(N+255)/256, 256>>>(N,  -3.0, 3.0,  d_dresult); /*why this one does not work? */





  cudaMemcpy(dresult, d_dresult, N*sizeof(double), cudaMemcpyDeviceToHost);

  double maxError = 0.0f;
  for (int i = 0; i < N; i++)
    maxError = max(maxError, abs(dresult[i]-20.03574985));
  printf("Max error: %f in %i quadratures \n", maxError, N);
  printf("integral: %f  \n" ,dresult[0]);



  cudaFree(d_dresult);

}