LCOV - code coverage report
Current view: top level - src/Util - MPI.H (source / functions) Coverage Total Hit
Test: coverage_merged.info Lines: 0.0 % 21 0
Test Date: 2025-02-27 04:17:48 Functions: 0.0 % 2 0

            Line data    Source code
       1              : #ifndef UTIL_MPI_H
       2              : #define UTIL_MPI_H
       3              : 
       4              : namespace Util
       5              : {
       6              : namespace MPI
       7              : {
       8              : 
       9              : 
      10              : 
      11              : template <class T>
      12            0 : int Allgather(std::vector<T>& a_data)
      13              : {
      14              :     // Gather information about how many sites were found on each processor
      15              :     int nprocs;
      16            0 :     MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
      17            0 :     int my_num = a_data.size();
      18            0 :     int num = my_num;
      19              : 
      20              :     // Communicate the total array size to everyone
      21            0 :     amrex::ParallelAllReduce::Sum(num, MPI_COMM_WORLD);
      22              :     // Temporary buffers to receive data from all procs
      23            0 :     std::vector<T> a_data_all(num);
      24              :     // Send information about how many sites on each proc to all procs
      25            0 :     std::vector<int> nsites_procs(nprocs);
      26            0 :     MPI_Allgather(&my_num, 1, amrex::ParallelDescriptor::Mpi_typemap<int>::type(),
      27            0 :         nsites_procs.data(), 1, amrex::ParallelDescriptor::Mpi_typemap<int>::type(),
      28              :         MPI_COMM_WORLD);
      29              :     // Calculate the offset for each
      30            0 :     std::vector<int> nsites_disp(nprocs);
      31            0 :     for (int i = 0; i < nprocs; i++)
      32              :     {
      33            0 :         nsites_disp[i] = 0;
      34            0 :         for (int j = 0; j < i; j++) nsites_disp[i] += nsites_procs[j];
      35              :     }
      36              :     // Store the MPI datatype for each
      37            0 :     MPI_Datatype mpi_type = amrex::ParallelDescriptor::Mpi_typemap<T>::type();
      38            0 :     MPI_Allgatherv(
      39            0 :         a_data.data(), my_num, mpi_type,
      40            0 :         a_data_all.data(), nsites_procs.data(), nsites_disp.data(), mpi_type,
      41              :         MPI_COMM_WORLD);
      42              :     // Swap out the data so the buffers are no longer needed.
      43            0 :     a_data.swap(a_data_all);
      44            0 :     a_data_all.clear();
      45            0 :     return 0;
      46            0 : }
      47              : 
      48              : 
      49              : }
      50              : }
      51              : 
      52              : 
      53              : 
      54              : #endif
        

Generated by: LCOV version 2.0-1