创建动态大小的 MPI 文件视图

Creating dynamically sized MPI file views

我想使用集体 MPI I/O 写出一个二进制文件。我的计划是创建一个类似于

的 MPI 派生类型
struct soln_dynamic_t
{
    int int_data[2];
    double *u;   /* Length constant for all instances of this struct */
};

然后每个处理器根据派生类型创建一个视图,并写入视图。

对于 *uu[10] 替换的情况,我已经完成了所有工作(请参阅下面的完整代码),但最终,我想要一个动态长度数组 u。 (以防万一,对于任何 运行,soln_dynamic_t 的所有实例的长度都是固定的,但在编译时未知。)

处理此问题的最佳方法是什么?

我已经阅读了几篇关于为什么我不能使用的帖子 soln_dynamic_t 直接作为 MPI 结构。问题是不能保证处理器在 u[0]int_data[0] 之间具有相同的偏移量。 (对吗?)

另一方面,结构

struct soln_static_t
{
    int int_data[2];
    double u[10];     /* fixed at compile time */
};

之所以有效,是因为偏移量保证在不同处理器之间是相同的。

我考虑过几种方法:

我猜一定有一个标准的方法可以做到这一点。任何建议都会很有帮助。

关于这个问题的其他几篇文章很有帮助,尽管它们主要处理沟通而不是文件 I/O。

这是完整的代码::

#include <mpi.h>

typedef struct 
{
    int int_data[2];
    double u[10];  /* Make this a dynamic length (but fixed) */
} soln_static_t;


void build_soln_type(int n, int* int_data, double *u, MPI_Datatype *soln_t)
{
    int block_lengths[2] = {2,n};
    MPI_Datatype typelist[2] = {MPI_INT, MPI_DOUBLE};

    MPI_Aint disp[2], start_address, address;    
    MPI_Address(int_data,&start_address);
    MPI_Address(u,&address);
    disp[0] = 0;
    disp[1] = address-start_address;

    MPI_Datatype tmp_type;
    MPI_Type_create_struct(2,block_lengths,disp,typelist,&tmp_type);

    MPI_Aint extent;
    extent = block_lengths[0]*sizeof(int) + block_lengths[1]*sizeof(double);
    MPI_Type_create_resized(tmp_type, 0, extent, soln_t);
    MPI_Type_commit(soln_t);
}

void main(int argc, char** argv)
{
    MPI_File   file;
    int globalsize, localsize, starts, order;

    MPI_Datatype localarray, soln_t;
    int rank, nprocs, nsize = 10;  /* must match size in struct above */

    /* --- Initialize MPI */
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* --- Set up data to write out */
    soln_static_t data;
    data.int_data[0] = nsize;
    data.int_data[1] = rank;
    data.u[0] = 3.14159;  /* To check that data is written as expected */
    build_soln_type(nsize, data.int_data, data.u, &soln_t);

    MPI_File_open(MPI_COMM_WORLD, "bin.out", 
                  MPI_MODE_CREATE|MPI_MODE_WRONLY,
                  MPI_INFO_NULL, &file);

    /* --- Create file view for this processor */
    globalsize = nprocs;  
    localsize = 1;
    starts = rank;
    order = MPI_ORDER_C;

    MPI_Type_create_subarray(1, &globalsize, &localsize, &starts, order, 
                             soln_t, &localarray);
    MPI_Type_commit(&localarray);

    MPI_File_set_view(file, 0, soln_t, localarray, 
                           "native", MPI_INFO_NULL);

    /* --- Write data into view */
    MPI_File_write_all(file, data.int_data, 1, soln_t, MPI_STATUS_IGNORE);

    /* --- Clean up */
    MPI_File_close(&file);

    MPI_Type_free(&localarray);
    MPI_Type_free(&soln_t);

    MPI_Finalize();
}

由于 soln_dynamic_t 类型的 u 数组的大小在运行时是已知的并且之后不会改变,我宁愿建议另一种方法。

基本上,您将所有数据连续存储在内存中:

typedef struct
{
    int int_data[2];
    double u[];  /* Make this a dynamic length (but fixed) */
} soln_dynamic_t;

那你得手动分配这个struct

soln_dynamic_t * alloc_soln(int nsize, int count) {
    return (soln_dynamic_t *)calloc(offsetof(soln_dynamic_t, u)+nsize*sizeof(double), count);
}

请注意,您不能直接访问 soln_dynamic_t 的数组,因为在编译时大小未知。相反,您必须手动计算指针。

soln_dynamic_t *p = alloc_soln(10, 2);
p[0].int_data[0] = 1;  // OK
p[0].u[0] = 2;         // OK
p[1].int_data[0] = 3;  // KO ! since sizeof(soln_dynamic_t) is unknown at compile time.

这是您程序的完整重写版本

#include <mpi.h>
#include <malloc.h>

typedef struct 
{
    int int_data[2];
    double u[];  /* Make this a dynamic length (but fixed) */
} soln_dynamic_t;


void build_soln_type(int n, MPI_Datatype *soln_t)
{
    int block_lengths[2] = {2,n};
    MPI_Datatype typelist[2] = {MPI_INT, MPI_DOUBLE};
    MPI_Aint disp[2];

    disp[0] = offsetof(soln_dynamic_t, int_data);
    disp[1] = offsetof(soln_dynamic_t, u);

    MPI_Datatype tmp_type;
    MPI_Type_create_struct(2,block_lengths,disp,typelist,&tmp_type);

    MPI_Aint extent;
    extent = offsetof(soln_dynamic_t, u) + block_lengths[1]*sizeof(double);
    MPI_Type_create_resized(tmp_type, 0, extent, soln_t);
    MPI_Type_free(&tmp_type);
    MPI_Type_commit(soln_t);
}

soln_dynamic_t * alloc_soln(int nsize, int count) {
    return (soln_dynamic_t *)calloc(offsetof(soln_dynamic_t, u) + nsize*sizeof(double), count);
}

int main(int argc, char** argv)
{
    MPI_File   file;
    int globalsize, localsize, starts, order;

    MPI_Datatype localarray, soln_t;
    int rank, nprocs, nsize = 10;  /* must match size in struct above */

    /* --- Initialize MPI */
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* --- Set up data to write out */
    soln_dynamic_t *data = alloc_soln(nsize,1);
    data->int_data[0] = nsize;
    data->int_data[1] = rank;
    data->u[0] = 3.14159;  /* To check that data is written as expected */
    build_soln_type(nsize, &soln_t);

    MPI_File_open(MPI_COMM_WORLD, "bin2.out", 
                  MPI_MODE_CREATE|MPI_MODE_WRONLY,
                  MPI_INFO_NULL, &file);

    /* --- Create file view for this processor */
    globalsize = nprocs;  
    localsize = 1;
    starts = rank;
    order = MPI_ORDER_C;

    MPI_Type_create_subarray(1, &globalsize, &localsize, &starts, order, 
                             soln_t, &localarray);
    MPI_Type_commit(&localarray);

    MPI_File_set_view(file, 0, soln_t, localarray, 
                           "native", MPI_INFO_NULL);

    /* --- Write data into view */
    MPI_File_write_all(file, data, 1, soln_t, MPI_STATUS_IGNORE);

    /* --- Clean up */
    MPI_File_close(&file);

    MPI_Type_free(&localarray);
    MPI_Type_free(&soln_t);

    MPI_Finalize();
    return 0;
}