使用自定义内核或 CUBLAS 对矢量张量积进行 CUDA 优化

CUDA optimization for a vector tensor product using a custom kernel or CUBLAS

我有两个向量 ab。每个向量包含一个3d点的坐标x, y, z vector3f.

struct Vector3f
{ 
    float x;
    float y;
    float z;
}

向量 a 的大小为 n = 5000 点,向量 b 的大小为 m = 4000。我需要像图片右侧那样在它们之间做一个张量向量积。结果向量的长度应为 5000 * 4000,并包含浮点数,结果存储在 c

__global__ void tensor3dProdcutClassic(const int n, const int m, const Vector3f *a, const Vector3f *b, float *c) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    // int j = blockIdy.y * blockDim.y + threadIdx.y;

    //check if  the idx is out of range
    if (i < n) {
        for (int j = 0; j < m; j++) {
            int idx = j + m * i;
            c[idx] = a[i].x * b[j].x + a[i].y * b[j].y + a[i].z * b[j].z;
        } 
    }
} 

dim3 blockSize(32, 1, 1);
dim3 gridSize((n + blockSize.x - 1) / blockSize.x, 1, 1);

tensor3dProdcutClassic<<<gridSize, blockSize>>>(n, m, x, y, out);

我在 Volta arch 上的执行时间很长。
我的问题是如何优化内核以减少时间,这主要是因为内核中的 for 循环。我在这里知道所有全局读写都没有合并。

您可以让内核同时通过 ab,像这样:

__global__ void tensor3dProdcutClassic(const int n, const int m, const Vector3f *a, const Vector3f *b, float *c)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdy.y * blockDim.y + threadIdx.y;

    if (i < n && j < m)
    {
        int idx = j + m * i;
        c[idx] = a[i].x * b[j].x + a[i].y * b[j].y + a[i].z * b[j].z;
    }
}

dim3 blockSize(32, 32);
dim3 gridSize((int)ceil(n / 32.0), (int)ceil(m / 32.0));

tensor3dProdcutClassic<<<gridSize, blockSize>>>(n, m, x, y, out);

更新
我尝试修改代码以使用带和不带共享内存的单个数组,不带共享内存的代码总是快 3 或 4 倍。

共享内存:

#define BLOCK_SIZE 32
void tensor3dProdcut(const int n, const int m, const float* a, const float* b, float* c)
{
    float* d_a;
    size_t size = (uint64_t)n * 3 * sizeof(float);
    cudaMalloc(&d_a, size);
    cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
    float* d_b;
    size = (uint64_t)m * 3 * sizeof(float);
    cudaMalloc(&d_b, size);
    cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
    float* d_c;
    size = (uint64_t)n * m * sizeof(float);
    cudaMalloc(&d_c, size);
    dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
    dim3 dimGrid((int)ceil((double)n / BLOCK_SIZE), (int)ceil((double)m / BLOCK_SIZE));
    tensor3dProdcutKernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n, m);
    cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
    cudaFree(d_a);
    cudaFree(d_b);
    cudaFree(d_c);
}

__global__ void tensor3dProdcutKernel(float* a, float* b, float* c, int n, int m)
{
    int i, blockRow, blockCol, row, col;
    float Cvalue;
    blockRow = blockIdx.x;
    blockCol = blockIdx.y;
    row = threadIdx.x;
    col = threadIdx.y;
    if (blockRow * BLOCK_SIZE + row >= n || blockCol * BLOCK_SIZE + col >= m)
        return;
    __shared__ double as[BLOCK_SIZE][3];
    __shared__ double bs[BLOCK_SIZE][3];
    for (i = 0; i < 3; i++)
    {
        as[row][i] = a[(BLOCK_SIZE * blockRow + row) * 3 + i];
        bs[col][i] = b[(BLOCK_SIZE * blockCol + col) * 3 + i];
    }
    __syncthreads();
    Cvalue = 0;
    for (i = 0; i < 3; i++)
        Cvalue += as[row][i] * bs[col][i];
    c[(BLOCK_SIZE * blockRow + row) * m + BLOCK_SIZE * blockCol + col] = Cvalue;
}

没有共享内存:

__global__ void tensor3dProdcutKernel(float* a, float* b, float* c, int n, int m)
{
    int i, blockRow, blockCol, row, col;
    float Cvalue;
    blockRow = blockIdx.x;
    blockCol = blockIdx.y;
    row = threadIdx.x;
    col = threadIdx.y;
    if (blockRow * BLOCK_SIZE + row >= n || blockCol * BLOCK_SIZE + col >= m)
        return;
    Cvalue = 0;
    for (i = 0; i < 3; i++)
        Cvalue += a[(BLOCK_SIZE * blockRow + row) * 3 + i] * b[(BLOCK_SIZE * blockCol + col) * 3 + i];
    c[(BLOCK_SIZE * blockRow + row) * m + BLOCK_SIZE * blockCol + col] = Cvalue;
}