将结构数组传递给 GPU 内核(ALEA 库)时,clrobj(<class name>) 没有 llvm
clrobj(<class name>) does not have llvm when passing array of struct to GPU Kernel (ALEA Library)
我在尝试使用 ALEA 库将结构数组传递给 NVIDIA 内核的代码中遇到 "Fody/Alea.CUDA: clrobj(cGPU) does not have llvm" 构建错误。这是我的代码的简化版本。为了使代码简单,我删除了输出收集功能。我现在只需要能够将结构数组发送到 GPU。
using Alea.CUDA;
using Alea.CUDA.Utilities;
using Alea.CUDA.IL;
namespace GPUProgramming
{
public class cGPU
{
public int Slice;
public float FloatValue;
}
[AOTCompile(AOTOnly = true)]
public class TestModule : ILGPUModule
{
public TestModule(GPUModuleTarget target) : base(target)
{
}
const int blockSize = 64;
[Kernel]
public void Kernel2(deviceptr<cGPU> Data, int n)
{
var start = blockIdx.x * blockDim.x + threadIdx.x;
int ind = threadIdx.x;
var sharedSlice = __shared__.Array<int>(64);
var sharedFloatValue = __shared__.Array<float>(64);
if (ind < n && start < n)
{
sharedSlice[ind] = Data[start].Slice;
sharedFloatValue[ind] = Data[start].FloatValue;
Intrinsic.__syncthreads();
}
}
public void Test2(deviceptr<cGPU> Data, int n, int NumOfBlocks)
{
var GridDim = new dim3(NumOfBlocks, 1);
var BlockDim = new dim3(64, 1);
try
{
var lp = new LaunchParam(GridDim, BlockDim);
GPULaunch(Kernel2, lp, Data, n);
}
catch (CUDAInterop.CUDAException x)
{
var code = x.Data0;
Console.WriteLine("ErrorCode = {0}", code);
}
}
public void Test2(cGPU[] Data)
{
int NumOfBlocks = Common.divup(Data.Length, blockSize);
using (var d_Slice = GPUWorker.Malloc(Data))
{
try
{
Test_Kernel2(d_Slice.Ptr, Data.Length, NumOfBlocks);
}
catch (CUDAInterop.CUDAException x)
{
var code = x.Data0;
Console.WriteLine("ErrorCode = {0}", x.Data0);
}
}
}
}
}
你的数据是class,是引用类型。尝试使用结构。引用类型不太适合 Gpu,因为它需要在堆上分配小内存。
我在尝试使用 ALEA 库将结构数组传递给 NVIDIA 内核的代码中遇到 "Fody/Alea.CUDA: clrobj(cGPU) does not have llvm" 构建错误。这是我的代码的简化版本。为了使代码简单,我删除了输出收集功能。我现在只需要能够将结构数组发送到 GPU。
using Alea.CUDA;
using Alea.CUDA.Utilities;
using Alea.CUDA.IL;
namespace GPUProgramming
{
public class cGPU
{
public int Slice;
public float FloatValue;
}
[AOTCompile(AOTOnly = true)]
public class TestModule : ILGPUModule
{
public TestModule(GPUModuleTarget target) : base(target)
{
}
const int blockSize = 64;
[Kernel]
public void Kernel2(deviceptr<cGPU> Data, int n)
{
var start = blockIdx.x * blockDim.x + threadIdx.x;
int ind = threadIdx.x;
var sharedSlice = __shared__.Array<int>(64);
var sharedFloatValue = __shared__.Array<float>(64);
if (ind < n && start < n)
{
sharedSlice[ind] = Data[start].Slice;
sharedFloatValue[ind] = Data[start].FloatValue;
Intrinsic.__syncthreads();
}
}
public void Test2(deviceptr<cGPU> Data, int n, int NumOfBlocks)
{
var GridDim = new dim3(NumOfBlocks, 1);
var BlockDim = new dim3(64, 1);
try
{
var lp = new LaunchParam(GridDim, BlockDim);
GPULaunch(Kernel2, lp, Data, n);
}
catch (CUDAInterop.CUDAException x)
{
var code = x.Data0;
Console.WriteLine("ErrorCode = {0}", code);
}
}
public void Test2(cGPU[] Data)
{
int NumOfBlocks = Common.divup(Data.Length, blockSize);
using (var d_Slice = GPUWorker.Malloc(Data))
{
try
{
Test_Kernel2(d_Slice.Ptr, Data.Length, NumOfBlocks);
}
catch (CUDAInterop.CUDAException x)
{
var code = x.Data0;
Console.WriteLine("ErrorCode = {0}", x.Data0);
}
}
}
}
}
你的数据是class,是引用类型。尝试使用结构。引用类型不太适合 Gpu,因为它需要在堆上分配小内存。