Line data Source code
1 : #ifndef AMREX_ML_LINOP_H_
2 : #define AMREX_ML_LINOP_H_
3 : #include <AMReX_Config.H>
4 :
5 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
6 : #include <AMReX_Hypre.H>
7 : #include <AMReX_HypreNodeLap.H>
8 : #endif
9 :
10 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
11 : #include <AMReX_PETSc.H>
12 : #endif
13 :
14 : #ifdef AMREX_USE_EB
15 : #include <AMReX_EBMultiFabUtil.H>
16 : #include <AMReX_MultiCutFab.H>
17 : #endif
18 :
19 : #include <AMReX_Any.H>
20 : #include <AMReX_BndryRegister.H>
21 : #include <AMReX_FabDataType.H>
22 : #include <AMReX_MLMGBndry.H>
23 : #include <AMReX_MultiFabUtil.H>
24 :
25 : #include <algorithm>
26 : #include <string>
27 :
28 : namespace amrex {
29 :
30 : enum class BottomSolver : int {
31 : Default, smoother, bicgstab, cg, bicgcg, cgbicg, hypre, petsc
32 : };
33 :
34 : struct LPInfo
35 : {
36 : bool do_agglomeration = true;
37 : bool do_consolidation = true;
38 : bool do_semicoarsening = false;
39 : int agg_grid_size = -1;
40 : int con_grid_size = -1;
41 : int con_ratio = 2;
42 : int con_strategy = 3;
43 : bool has_metric_term = true;
44 : int max_coarsening_level = 30;
45 : int max_semicoarsening_level = 0;
46 : int semicoarsening_direction = -1;
47 : int hidden_direction = -1;
48 :
49 : LPInfo& setAgglomeration (bool x) noexcept { do_agglomeration = x; return *this; }
50 : LPInfo& setConsolidation (bool x) noexcept { do_consolidation = x; return *this; }
51 : LPInfo& setSemicoarsening (bool x) noexcept { do_semicoarsening = x; return *this; }
52 : LPInfo& setAgglomerationGridSize (int x) noexcept { agg_grid_size = x; return *this; }
53 : LPInfo& setConsolidationGridSize (int x) noexcept { con_grid_size = x; return *this; }
54 : LPInfo& setConsolidationRatio (int x) noexcept { con_ratio = x; return *this; }
55 : LPInfo& setConsolidationStrategy (int x) noexcept { con_strategy = x; return *this; }
56 : LPInfo& setMetricTerm (bool x) noexcept { has_metric_term = x; return *this; }
57 0 : LPInfo& setMaxCoarseningLevel (int n) noexcept { max_coarsening_level = n; return *this; }
58 : LPInfo& setMaxSemicoarseningLevel (int n) noexcept { max_semicoarsening_level = n; return *this; }
59 : LPInfo& setSemicoarseningDirection (int n) noexcept { semicoarsening_direction = n; return *this; }
60 : LPInfo& setHiddenDirection (int n) noexcept { hidden_direction = n; return *this; }
61 :
62 : [[nodiscard]] bool hasHiddenDimension () const noexcept {
63 : return hidden_direction >=0 && hidden_direction < AMREX_SPACEDIM;
64 : }
65 :
66 : static constexpr int getDefaultAgglomerationGridSize () {
67 : #ifdef AMREX_USE_GPU
68 : return 32;
69 : #else
70 : return AMREX_D_PICK(32, 16, 8);
71 : #endif
72 : }
73 :
74 : static constexpr int getDefaultConsolidationGridSize () {
75 : #ifdef AMREX_USE_GPU
76 : return 32;
77 : #else
78 : return AMREX_D_PICK(32, 16, 8);
79 : #endif
80 : }
81 : };
82 :
83 : struct LinOpEnumType
84 : {
85 : enum struct BCMode { Homogeneous, Inhomogeneous };
86 : enum struct StateMode { Solution, Correction };
87 : enum struct Location { FaceCenter, FaceCentroid, CellCenter, CellCentroid };
88 : };
89 :
90 : template <typename T> class MLMGT;
91 : template <typename T> class MLCGSolverT;
92 : template <typename T> class MLPoissonT;
93 : template <typename T> class MLABecLaplacianT;
94 : template <typename T> class GMRESMLMGT;
95 :
96 : template <typename MF>
97 : class MLLinOpT
98 : {
99 : public:
100 :
101 : template <typename T> friend class MLMGT;
102 : template <typename T> friend class MLCGSolverT;
103 : template <typename T> friend class MLPoissonT;
104 : template <typename T> friend class MLABecLaplacianT;
105 : template <typename T> friend class GMRESMLMGT;
106 :
107 : using MFType = MF;
108 : using FAB = typename FabDataType<MF>::fab_type;
109 : using RT = typename FabDataType<MF>::value_type;
110 :
111 : using BCType = LinOpBCType;
112 : using BCMode = LinOpEnumType::BCMode;
113 : using StateMode = LinOpEnumType::StateMode;
114 : using Location = LinOpEnumType::Location;
115 :
116 : MLLinOpT () = default;
117 300 : virtual ~MLLinOpT () = default;
118 :
119 : MLLinOpT (const MLLinOpT<MF>&) = delete;
120 : MLLinOpT (MLLinOpT<MF>&&) = delete;
121 : MLLinOpT<MF>& operator= (const MLLinOpT<MF>&) = delete;
122 : MLLinOpT<MF>& operator= (MLLinOpT<MF>&&) = delete;
123 :
124 : void define (const Vector<Geometry>& a_geom,
125 : const Vector<BoxArray>& a_grids,
126 : const Vector<DistributionMapping>& a_dmap,
127 : const LPInfo& a_info,
128 : const Vector<FabFactory<FAB> const*>& a_factory,
129 : bool eb_limit_coarsening = true);
130 :
131 : [[nodiscard]] virtual std::string name () const { return std::string("Unspecified"); }
132 :
133 : /**
134 : * \brief Boundary of the whole domain.
135 : *
136 : * This functions must be called, and must be called before other bc
137 : * functions. This version is for single-component solve or when all the
138 : * components have the same BC types.
139 : *
140 : * \param lobc lower boundaries
141 : * \param hibc upper boundaries
142 : */
143 : void setDomainBC (const Array<BCType,AMREX_SPACEDIM>& lobc,
144 : const Array<BCType,AMREX_SPACEDIM>& hibc) noexcept;
145 :
146 : /**
147 : * \brief Boundary of the whole domain.
148 : *
149 : * This functions must be called, and must be called before other bc
150 : * functions. This version is for multi-component solve.
151 : *
152 : * \param lobc lower boundaries
153 : * \param hibc upper boundaries
154 : */
155 : void setDomainBC (const Vector<Array<BCType,AMREX_SPACEDIM> >& lobc,
156 : const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc) noexcept;
157 :
158 : /**
159 : * \brief Set location of domain boundaries.
160 : *
161 : * By default, domain BC is on the domain face. If that's the
162 : * case, this function doesn't need to be called. However, one
163 : * could use this function to set non-zero domain BC locations.
164 : * Note all values should be >= 0. If this function is called,
165 : * it MUST be called before setLevelBC.
166 : */
167 : void setDomainBCLoc (const Array<Real,AMREX_SPACEDIM>& lo_bcloc,
168 : const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept;
169 :
170 : /**
171 : * \brief Needs coarse data for bc?
172 : *
173 : * If the lowest level grids does not cover the entire domain, coarse
174 : * level data are needed for supplying Dirichlet bc at coarse/fine
175 : * boundary, even when the domain bc is not Dirichlet.
176 : */
177 : [[nodiscard]] bool needsCoarseDataForBC () const noexcept { return m_needs_coarse_data_for_bc; }
178 :
179 : /**
180 : * \brief Set coarse/fine boundary conditions. For cell-centered solves
181 : * only.
182 : *
183 : * If we want to do a linear solve where the boundary conditions on the
184 : * coarsest AMR level of the solve come from a coarser level (e.g. the
185 : * base AMR level of the solve is > 0 and does not cover the entire
186 : * domain), we must explicitly provide the coarser data. Boundary
187 : * conditions from a coarser level are Dirichlet by default. The MultiFab
188 : * crse does not need to have ghost cells and is at a coarser resolution
189 : * than the coarsest AMR level of the solve; it is used to supply
190 : * (interpolated) boundary conditions for the solve. NOTE: If this is
191 : * called, it must be called before `setLevelBC`. If crse is nullptr,
192 : * then the bc values are assumed to be zero. The coarse/fine BC type
193 : * can be changed to homogeneous Neumann by the bc_type argument. In that
194 : * case, use nullptr for the crse argument.
195 : *
196 : * \param crse the coarse AMR level data
197 : * \param crse_ratio the coarsening ratio between fine and coarse AMR levels.
198 : * \param bc_type optional. It's Dirichlet by default, and can be Neumann.
199 : */
200 : void setCoarseFineBC (const MF* crse, int crse_ratio,
201 : LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
202 :
203 : void setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
204 : LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
205 :
206 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
207 : void setCoarseFineBC (const AMF* crse, int crse_ratio,
208 : LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
209 :
210 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
211 : void setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
212 : LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
213 :
214 : /**
215 : * \brief Set boundary conditions for given level. For cell-centered
216 : * solves only.
217 : *
218 : * This must be called for each level. Argument `levelbcdata` is used to
219 : * supply Dirichlet or Neumann bc at the physical domain; if those data
220 : * are homogeneous we can pass nullptr instead of levelbcdata.
221 : * Regardless, this function must be called. If used, the MultiFab
222 : * levelbcdata must have one ghost cell. Only the data outside the
223 : * physical domain will be used. It is assumed that the data in those
224 : * ghost cells outside the domain live exactly on the face of the
225 : * physical domain. Argument `amrlev` is relative level such that the
226 : * lowest to the solver is always 0. The optional arguments
227 : * robinbc_[a|b|f] provide Robin boundary condition `a*phi + b*dphi/dn =
228 : * f`. Note that `d./dn` is `d./dx` at the upper boundary and `-d./dx`
229 : * at the lower boundary, for Robin BC. However, for inhomogeneous
230 : * Neumann BC, the value in leveldata is assumed to be `d./dx`.
231 : */
232 : virtual void setLevelBC (int /*amrlev*/, const MF* /*levelbcdata*/,
233 : const MF* /*robinbc_a*/ = nullptr,
234 : const MF* /*robinbc_b*/ = nullptr,
235 : const MF* /*robinbc_f*/ = nullptr) = 0;
236 :
237 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
238 : void setLevelBC (int amrlev, const AMF* levelbcdata,
239 : const AMF* robinbc_a = nullptr,
240 : const AMF* robinbc_b = nullptr,
241 : const AMF* robinbc_f = nullptr);
242 :
243 : //! Set verbosity
244 : void setVerbose (int v) noexcept { verbose = v; }
245 :
246 : //! Set order of interpolation at coarse/fine boundary
247 : void setMaxOrder (int o) noexcept { maxorder = o; }
248 : //! Get order of interpolation at coarse/fine boundary
249 : [[nodiscard]] int getMaxOrder () const noexcept { return maxorder; }
250 :
251 : //! Set the flag for whether the solver should try to make singular
252 : //! problem solvable, which is on by default.
253 : void setEnforceSingularSolvable (bool o) noexcept { enforceSingularSolvable = o; }
254 : //! Get the flag for whether the solver should try to make singular
255 : //! problem solvable.
256 : [[nodiscard]] bool getEnforceSingularSolvable () const noexcept { return enforceSingularSolvable; }
257 :
258 : [[nodiscard]] virtual BottomSolver getDefaultBottomSolver () const { return BottomSolver::bicgstab; }
259 :
260 : //! Return number of components
261 : [[nodiscard]] virtual int getNComp () const { return 1; }
262 :
263 : [[nodiscard]] virtual int getNGrow (int /*a_lev*/ = 0, int /*mg_lev*/ = 0) const { return 0; }
264 :
265 : //! Does it need update if it's reused?
266 : [[nodiscard]] virtual bool needsUpdate () const { return false; }
267 : //! Update for reuse.
268 : virtual void update () {}
269 :
270 : /**
271 : * \brief Restriction onto coarse MG level
272 : *
273 : * \param amrlev AMR level
274 : * \param cmglev coarse MG level
275 : * \param crse coarse data. This is the output.
276 : * \param fine fine data. This is the input. Some operators might need to fill ghost cells.
277 : * This is why it's not a const reference.
278 : */
279 : virtual void restriction (int amrlev, int cmglev, MF& crse, MF& fine) const = 0;
280 :
281 : /**
282 : * \brief Add interpolated coarse MG level data to fine MG level data
283 : *
284 : * \param amrlev AMR level
285 : * \param fmglev fine MG level
286 : * \param crse coarse data.
287 : * \param fine fine data.
288 : */
289 : virtual void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const = 0;
290 :
291 : /**
292 : * \brief Overwrite fine MG level data with interpolated coarse data.
293 : *
294 : * \param amrlev AMR level
295 : * \param fmglev fine MG level
296 : * \param fine fine MG level data
297 : * \param crse coarse MG level data
298 : */
299 : virtual void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
300 : {
301 : amrex::ignore_unused(amrlev, fmglev, fine, crse);
302 : amrex::Abort("MLLinOpT::interpAssign: Must be implemented for FMG cycle");
303 : }
304 :
305 : /**
306 : * \brief Interpolation between AMR levels
307 : *
308 : * \param famrlev fine AMR level
309 : * \param fine fine level data
310 : * \param crse coarse level data
311 : * \param nghost number of ghost cells
312 : */
313 : virtual void interpolationAmr (int famrlev, MF& fine, const MF& crse,
314 : IntVect const& nghost) const
315 : {
316 : amrex::ignore_unused(famrlev, fine, crse, nghost);
317 : amrex::Abort("MLLinOpT::interpolationAmr: Must be implemented for composite solves across multiple AMR levels");
318 : }
319 :
320 : /**
321 : * \brief Average-down data from fine AMR level to coarse AMR level.
322 : *
323 : * \param camrlev coarse AMR level
324 : * \param crse_sol solutoin on coarse AMR level
325 : * \param crse_rhs RHS on coarse AMR level
326 : * \param fine_sol solution on fine AMR level
327 : * \param fine_rhs RHS on fine AMR level
328 : */
329 : virtual void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
330 : const MF& fine_sol, const MF& fine_rhs)
331 : {
332 : amrex::ignore_unused(camrlev, crse_sol, crse_rhs, fine_sol, fine_rhs);
333 : amrex::Abort("MLLinOpT::averageDownSolutionRHS: Must be implemented for composite solves across multiple AMR levels");
334 : }
335 :
336 : /**
337 : * \brief Apply the linear operator, out = L(in)
338 : *
339 : * \param amrlev AMR level
340 : * \param mglev MG level
341 : * \param out output
342 : * \param in input
343 : * \param bc_mode Is the BC homogeneous or inhomogeneous?
344 : * \param s_mode Are data data solution or correction?
345 : * \param bndry object for handling coarse/fine and physical boundaries
346 : */
347 : virtual void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
348 : StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const = 0;
349 :
350 : /**
351 : * \brief Smooth
352 : *
353 : * \param amrlev AMR level
354 : * \param mglev MG level
355 : * \param sol unknowns
356 : * \param rhs RHS
357 : * \param skip_fillboundary flag controlling whether ghost cell filling can be skipped.
358 : */
359 : virtual void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
360 : bool skip_fillboundary=false) const = 0;
361 :
362 : //! Divide mf by the diagonal component of the operator. Used by bicgstab.
363 : virtual void normalize (int amrlev, int mglev, MF& mf) const {
364 : amrex::ignore_unused(amrlev, mglev, mf);
365 : }
366 :
367 : /**
368 : * \brief Compute residual for solution
369 : *
370 : * \param amrlev AMR level
371 : * \param resid residual
372 : * \param x solution
373 : * \param b RHS
374 : * \param crse_bc_data optional argument providing BC at coarse/fine boundary.
375 : */
376 : virtual void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
377 : const MF* crse_bcdata=nullptr) = 0;
378 :
379 : /**
380 : * \brief Compute residual for the residual-correction form, resid = b - L(x)
381 : *
382 : * \param amrlev AMR level
383 : * \param mglev MG level
384 : * \param resid residual
385 : * \param x unknown in the residual-correction form
386 : * \param b RHS in the residual-correction form
387 : * \param bc_mode Is the BC homogeneous or inhomogeneous?
388 : * \param crse_bc_data optional argument providing BC at coarse/fine boundary.
389 : */
390 : virtual void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
391 : BCMode bc_mode, const MF* crse_bcdata=nullptr) = 0;
392 :
393 : /**
394 : * \brief Reflux at AMR coarse/fine boundary
395 : *
396 : * \param crse_amrlev coarse AMR level
397 : * \param res coarse level residual
398 : * \param crse_sol coarse level solution
399 : * \param crse_rhs coarse level RHS
400 : * \param fine_res fine level residual
401 : * \param fine_sol fine level solution
402 : * \param fine_rhs fine level RHS
403 : */
404 : virtual void reflux (int crse_amrlev,
405 : MF& res, const MF& crse_sol, const MF& crse_rhs,
406 : MF& fine_res, MF& fine_sol, const MF& fine_rhs) const
407 : {
408 : amrex::ignore_unused(crse_amrlev, res, crse_sol, crse_rhs, fine_res,
409 : fine_sol, fine_rhs);
410 : amrex::Abort("MLLinOpT::reflux: Must be implemented for composite solves across multiple AMR levels");
411 : }
412 :
413 : /**
414 : * \brief Compute fluxes
415 : *
416 : * \param amrlev AMR level
417 : * \param fluxes fluxes
418 : * \param sol solution
419 : * \param loc location of the fluxes
420 : */
421 : virtual void compFlux (int /*amrlev*/, const Array<MF*,AMREX_SPACEDIM>& /*fluxes*/,
422 : MF& /*sol*/, Location /*loc*/) const
423 : {
424 : amrex::Abort("AMReX_MLLinOp::compFlux::How did we get here?");
425 : }
426 :
427 : /**
428 : * \brief Compute gradients of the solution
429 : *
430 : * \param amrlev AMR level
431 : * \param grad grad(sol)
432 : * \param sol solution
433 : * \param loc location of the gradients
434 : */
435 : virtual void compGrad (int /*amrlev*/, const Array<MF*,AMREX_SPACEDIM>& /*grad*/,
436 : MF& /*sol*/, Location /*loc*/) const
437 : {
438 : amrex::Abort("AMReX_MLLinOp::compGrad::How did we get here?");
439 : }
440 :
441 : //! apply metric terms if there are any
442 : virtual void applyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
443 : //! unapply metric terms if there are any
444 : virtual void unapplyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
445 :
446 : //! This is needed for our nodal projection solver
447 : virtual void unimposeNeumannBC (int /*amrlev*/, MF& /*rhs*/) const {}
448 :
449 : //! Extra terms introduced when we treat inhomogeneous Nuemann BC as homogeneous.
450 : virtual void applyInhomogNeumannTerm (int /*amrlev*/, MF& /*rhs*/) const {}
451 :
452 : //! for overset solver only
453 : virtual void applyOverset (int /*amlev*/, MF& /*rhs*/) const {}
454 :
455 : //! scale RHS to fix solvability
456 : virtual void scaleRHS (int /*amrlev*/, MF& /*rhs*/) const {}
457 :
458 : //! get offset for fixing solvability
459 : virtual Vector<RT> getSolvabilityOffset (int /*amrlev*/, int /*mglev*/,
460 : MF const& /*rhs*/) const { return {}; }
461 :
462 : //! fix solvability by subtracting offset from RHS
463 : virtual void fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/,
464 : Vector<RT> const& /*offset*/) const {}
465 :
466 : virtual void prepareForSolve () = 0;
467 :
468 : virtual void prepareForGMRES () {}
469 :
470 : //! For GMRES to work, this might need to be implemented to mask out
471 : //! Dirichlet nodes or cells (e.g., EB covered cells or overset cells)
472 : virtual void setDirichletNodesToZero (int /*amrlev*/, int /*mglev*/,
473 : MF& /*mf*/) const
474 : {
475 : amrex::Warning("This function might need to be implemented for GMRES to work with this LinOp.");
476 : }
477 :
478 : //! Is it singular on given AMR level?
479 : [[nodiscard]] virtual bool isSingular (int amrlev) const = 0;
480 : //! Is the bottom of MG singular?
481 : [[nodiscard]] virtual bool isBottomSingular () const = 0;
482 :
483 : //! x dot y, used by the bottom solver
484 : virtual RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const = 0;
485 :
486 : virtual std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const
487 : {
488 : amrex::Abort("MLLinOp::makeNLinOp: N-Solve not supported");
489 : return nullptr;
490 : }
491 :
492 : virtual void getFluxes (const Vector<Array<MF*,AMREX_SPACEDIM> >& /*a_flux*/,
493 : const Vector<MF*>& /*a_sol*/,
494 : Location /*a_loc*/) const {
495 : amrex::Abort("MLLinOp::getFluxes: How did we get here?");
496 : }
497 : virtual void getFluxes (const Vector<MF*>& /*a_flux*/,
498 : const Vector<MF*>& /*a_sol*/) const {
499 : amrex::Abort("MLLinOp::getFluxes: How did we get here?");
500 : }
501 :
502 : #ifdef AMREX_USE_EB
503 : virtual void getEBFluxes (const Vector<MF*>& /*a_flux*/,
504 : const Vector<MF*>& /*a_sol*/) const {
505 : amrex::Abort("MLLinOp::getEBFluxes: How did we get here?");
506 : }
507 : #endif
508 :
509 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
510 : [[nodiscard]] virtual std::unique_ptr<Hypre> makeHypre (Hypre::Interface /*hypre_interface*/) const {
511 : amrex::Abort("MLLinOp::makeHypre: How did we get here?");
512 : return {nullptr};
513 : }
514 : [[nodiscard]] virtual std::unique_ptr<HypreNodeLap> makeHypreNodeLap(
515 : int /*bottom_verbose*/,
516 : const std::string& /* options_namespace */) const
517 : {
518 : amrex::Abort("MLLinOp::makeHypreNodeLap: How did we get here?");
519 : return {nullptr};
520 : }
521 : #endif
522 :
523 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
524 : [[nodiscard]] virtual std::unique_ptr<PETScABecLap> makePETSc () const {
525 : amrex::Abort("MLLinOp::makePETSc: How did we get here?");
526 : return {nullptr};
527 : }
528 : #endif
529 :
530 : [[nodiscard]] virtual bool supportNSolve () const { return false; }
531 :
532 : virtual void copyNSolveSolution (MF&, MF const&) const {}
533 :
534 : virtual void postSolve (Vector<MF>& /*sol*/) const {}
535 :
536 : [[nodiscard]] virtual RT normInf (int amrlev, MF const& mf, bool local) const = 0;
537 :
538 : virtual void averageDownAndSync (Vector<MF>& sol) const = 0;
539 :
540 : virtual void avgDownResAmr (int clev, MF& cres, MF const& fres) const
541 : {
542 : amrex::ignore_unused(clev, cres, fres);
543 : amrex::Abort("MLLinOpT::avgDownResAmr: Must be implemented for composite solves across multiple AMR levels");
544 : }
545 :
546 : // This function is needed for FMG cycle, but not V-cycle.
547 : virtual void avgDownResMG (int clev, MF& cres, MF const& fres) const;
548 :
549 : [[nodiscard]] bool isMFIterSafe (int amrlev, int mglev1, int mglev2) const;
550 :
551 : //! Return the number of AMR levels
552 : [[nodiscard]] int NAMRLevels () const noexcept { return m_num_amr_levels; }
553 :
554 : //! Return the number of MG levels at given AMR level
555 : [[nodiscard]] int NMGLevels (int amrlev) const noexcept { return m_num_mg_levels[amrlev]; }
556 :
557 : [[nodiscard]] const Geometry& Geom (int amr_lev, int mglev=0) const noexcept { return m_geom[amr_lev][mglev]; }
558 :
559 : // BC
560 : Vector<Array<BCType, AMREX_SPACEDIM> > m_lobc;
561 : Vector<Array<BCType, AMREX_SPACEDIM> > m_hibc;
562 : // Need to save the original copy because we change the BC type to
563 : // Neumann for inhomogeneous Neumann and Robin.
564 : Vector<Array<BCType, AMREX_SPACEDIM> > m_lobc_orig;
565 : Vector<Array<BCType, AMREX_SPACEDIM> > m_hibc_orig;
566 :
567 : protected:
568 :
569 : static constexpr int mg_coarsen_ratio = 2;
570 : static constexpr int mg_box_min_width = 2;
571 : #ifdef AMREX_USE_EB
572 : static constexpr int mg_domain_min_width = 4;
573 : #else
574 : static constexpr int mg_domain_min_width = 2;
575 : #endif
576 :
577 : LPInfo info;
578 :
579 : int verbose = 0;
580 :
581 : int maxorder = 3;
582 :
583 : bool enforceSingularSolvable = true;
584 :
585 : int m_num_amr_levels = 0;
586 : Vector<int> m_amr_ref_ratio;
587 :
588 : Vector<int> m_num_mg_levels;
589 : const MLLinOpT<MF>* m_parent = nullptr;
590 :
591 : IntVect m_ixtype;
592 :
593 : bool m_do_agglomeration = false;
594 : bool m_do_consolidation = false;
595 :
596 : bool m_do_semicoarsening = false;
597 : Vector<IntVect> mg_coarsen_ratio_vec;
598 :
599 : //! first Vector is for amr level and second is mg level
600 : Vector<Vector<Geometry> > m_geom;
601 : Vector<Vector<BoxArray> > m_grids;
602 : Vector<Vector<DistributionMapping> > m_dmap;
603 : Vector<Vector<std::unique_ptr<FabFactory<FAB> > > > m_factory;
604 : Vector<int> m_domain_covered;
605 :
606 : MPI_Comm m_default_comm = MPI_COMM_NULL;
607 : MPI_Comm m_bottom_comm = MPI_COMM_NULL;
608 : struct CommContainer {
609 : MPI_Comm comm;
610 : CommContainer (MPI_Comm m) noexcept : comm(m) {}
611 : CommContainer (const CommContainer&) = delete;
612 : CommContainer (CommContainer&&) = delete;
613 : void operator= (const CommContainer&) = delete;
614 : void operator= (CommContainer&&) = delete;
615 : ~CommContainer () { // NOLINT(modernize-use-equals-default)
616 : #ifdef BL_USE_MPI
617 : if (comm != MPI_COMM_NULL) { MPI_Comm_free(&comm); }
618 : #endif
619 : }
620 : };
621 : std::unique_ptr<CommContainer> m_raii_comm;
622 :
623 : Array<Real, AMREX_SPACEDIM> m_domain_bloc_lo {{AMREX_D_DECL(0._rt,0._rt,0._rt)}};
624 : Array<Real, AMREX_SPACEDIM> m_domain_bloc_hi {{AMREX_D_DECL(0._rt,0._rt,0._rt)}};
625 :
626 : bool m_needs_coarse_data_for_bc = false;
627 : LinOpBCType m_coarse_fine_bc_type = LinOpBCType::Dirichlet;
628 : IntVect m_coarse_data_crse_ratio = IntVect(-1);
629 : RealVect m_coarse_bc_loc;
630 : const MF* m_coarse_data_for_bc = nullptr;
631 : MF m_coarse_data_for_bc_raii;
632 :
633 : //! Return AMR refinement ratios
634 : [[nodiscard]] const Vector<int>& AMRRefRatio () const noexcept { return m_amr_ref_ratio; }
635 :
636 : //! Return AMR refinement ratio at given AMR level
637 : [[nodiscard]] int AMRRefRatio (int amr_lev) const noexcept { return m_amr_ref_ratio[amr_lev]; }
638 :
639 : [[nodiscard]] FabFactory<FAB> const* Factory (int amr_lev, int mglev=0) const noexcept {
640 : return m_factory[amr_lev][mglev].get();
641 : }
642 :
643 : [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> LoBC (int icomp = 0) const noexcept {
644 : return GpuArray<BCType,AMREX_SPACEDIM>{{AMREX_D_DECL(m_lobc[icomp][0],
645 : m_lobc[icomp][1],
646 : m_lobc[icomp][2])}};
647 : }
648 : [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> HiBC (int icomp = 0) const noexcept {
649 : return GpuArray<BCType,AMREX_SPACEDIM>{{AMREX_D_DECL(m_hibc[icomp][0],
650 : m_hibc[icomp][1],
651 : m_hibc[icomp][2])}};
652 : }
653 :
654 : [[nodiscard]] bool hasBC (BCType bct) const noexcept;
655 : [[nodiscard]] bool hasInhomogNeumannBC () const noexcept;
656 : [[nodiscard]] bool hasRobinBC () const noexcept;
657 :
658 : [[nodiscard]] virtual bool supportRobinBC () const noexcept { return false; }
659 : [[nodiscard]] virtual bool supportInhomogNeumannBC () const noexcept { return false; }
660 :
661 : #ifdef BL_USE_MPI
662 : [[nodiscard]] bool isBottomActive () const noexcept { return m_bottom_comm != MPI_COMM_NULL; }
663 : #else
664 : [[nodiscard]] bool isBottomActive () const noexcept { return true; }
665 : #endif
666 : [[nodiscard]] MPI_Comm BottomCommunicator () const noexcept { return m_bottom_comm; }
667 : [[nodiscard]] MPI_Comm Communicator () const noexcept { return m_default_comm; }
668 :
669 : void setCoarseFineBCLocation (const RealVect& cloc) noexcept { m_coarse_bc_loc = cloc; }
670 :
671 : [[nodiscard]] bool doAgglomeration () const noexcept { return m_do_agglomeration; }
672 : [[nodiscard]] bool doConsolidation () const noexcept { return m_do_consolidation; }
673 : [[nodiscard]] bool doSemicoarsening () const noexcept { return m_do_semicoarsening; }
674 :
675 : [[nodiscard]] bool isCellCentered () const noexcept { return m_ixtype == 0; }
676 :
677 : [[nodiscard]] virtual IntVect getNGrowVectRestriction () const {
678 : return isCellCentered() ? IntVect(0) : IntVect(1);
679 : }
680 :
681 : virtual void make (Vector<Vector<MF> >& mf, IntVect const& ng) const;
682 :
683 : [[nodiscard]] virtual MF make (int amrlev, int mglev, IntVect const& ng) const;
684 :
685 : [[nodiscard]] virtual MF makeAlias (MF const& mf) const;
686 :
687 : [[nodiscard]] virtual MF makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const;
688 :
689 : [[nodiscard]] virtual MF makeCoarseAmr (int famrlev, IntVect const& ng) const;
690 :
691 : [[nodiscard]] virtual std::unique_ptr<FabFactory<FAB> > makeFactory (int /*amrlev*/, int /*mglev*/) const {
692 : return std::make_unique<DefaultFabFactory<FAB>>();
693 : }
694 :
695 : virtual void resizeMultiGrid (int new_size);
696 :
697 : [[nodiscard]] bool hasHiddenDimension () const noexcept { return info.hasHiddenDimension(); }
698 : [[nodiscard]] int hiddenDirection () const noexcept { return info.hidden_direction; }
699 : [[nodiscard]] Box compactify (Box const& b) const noexcept;
700 :
701 : template <typename T>
702 : [[nodiscard]] Array4<T> compactify (Array4<T> const& a) const noexcept
703 : {
704 : if (info.hidden_direction == 0) {
705 : return Array4<T>(a.dataPtr(), {a.begin.y,a.begin.z,0}, {a.end.y,a.end.z,1}, a.nComp());
706 : } else if (info.hidden_direction == 1) {
707 : return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.z,0}, {a.end.x,a.end.z,1}, a.nComp());
708 : } else if (info.hidden_direction == 2) {
709 : return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.y,0}, {a.end.x,a.end.y,1}, a.nComp());
710 : } else {
711 : return a;
712 : }
713 : }
714 :
715 : template <typename T>
716 : [[nodiscard]] T get_d0 (T const& dx, T const& dy, T const&) const noexcept
717 : {
718 : if (info.hidden_direction == 0) {
719 : return dy;
720 : } else {
721 : return dx;
722 : }
723 : }
724 :
725 : template <typename T>
726 : [[nodiscard]] T get_d1 (T const&, T const& dy, T const& dz) const noexcept
727 : {
728 : if (info.hidden_direction == 0 || info.hidden_direction == 1) {
729 : return dz;
730 : } else {
731 : return dy;
732 : }
733 : }
734 :
735 : private:
736 :
737 : void defineGrids (const Vector<Geometry>& a_geom,
738 : const Vector<BoxArray>& a_grids,
739 : const Vector<DistributionMapping>& a_dmap,
740 : const Vector<FabFactory<FAB> const*>& a_factory);
741 : void defineBC ();
742 : static void makeAgglomeratedDMap (const Vector<BoxArray>& ba, Vector<DistributionMapping>& dm);
743 : static void makeConsolidatedDMap (const Vector<BoxArray>& ba, Vector<DistributionMapping>& dm,
744 : int ratio, int strategy);
745 : [[nodiscard]] MPI_Comm makeSubCommunicator (const DistributionMapping& dm);
746 :
747 : virtual void checkPoint (std::string const& /*file_name*/) const {
748 : amrex::Abort("MLLinOp:checkPoint: not implemented");
749 : }
750 :
751 : Vector<std::unique_ptr<MF>> levelbc_raii;
752 : Vector<std::unique_ptr<MF>> robin_a_raii;
753 : Vector<std::unique_ptr<MF>> robin_b_raii;
754 : Vector<std::unique_ptr<MF>> robin_f_raii;
755 : };
756 :
757 : template <typename MF>
758 : void
759 : MLLinOpT<MF>::define (const Vector<Geometry>& a_geom,
760 : const Vector<BoxArray>& a_grids,
761 : const Vector<DistributionMapping>& a_dmap,
762 : const LPInfo& a_info,
763 : const Vector<FabFactory<FAB> const*>& a_factory,
764 : [[maybe_unused]] bool eb_limit_coarsening)
765 : {
766 : BL_PROFILE("MLLinOp::define()");
767 :
768 : info = a_info;
769 : #ifdef AMREX_USE_GPU
770 : if (Gpu::notInLaunchRegion())
771 : {
772 : if (info.agg_grid_size <= 0) { info.agg_grid_size = AMREX_D_PICK(32, 16, 8); }
773 : if (info.con_grid_size <= 0) { info.con_grid_size = AMREX_D_PICK(32, 16, 8); }
774 : }
775 : else
776 : #endif
777 : {
778 : if (info.agg_grid_size <= 0) { info.agg_grid_size = LPInfo::getDefaultAgglomerationGridSize(); }
779 : if (info.con_grid_size <= 0) { info.con_grid_size = LPInfo::getDefaultConsolidationGridSize(); }
780 : }
781 :
782 : #ifdef AMREX_USE_EB
783 : if (!a_factory.empty() && eb_limit_coarsening) {
784 : const auto *f = dynamic_cast<EBFArrayBoxFactory const*>(a_factory[0]);
785 : if (f) {
786 : info.max_coarsening_level = std::min(info.max_coarsening_level,
787 : f->maxCoarseningLevel());
788 : }
789 : }
790 : #endif
791 : defineGrids(a_geom, a_grids, a_dmap, a_factory);
792 : defineBC();
793 : }
794 :
795 : template <typename MF>
796 : void
797 : MLLinOpT<MF>::defineGrids (const Vector<Geometry>& a_geom,
798 : const Vector<BoxArray>& a_grids,
799 : const Vector<DistributionMapping>& a_dmap,
800 : const Vector<FabFactory<FAB> const*>& a_factory)
801 : {
802 : BL_PROFILE("MLLinOp::defineGrids()");
803 :
804 : m_num_amr_levels = 0;
805 : for (int amrlev = 0; amrlev < a_geom.size(); amrlev++) {
806 : if (!a_grids[amrlev].empty()) {
807 : m_num_amr_levels++;
808 : }
809 : }
810 :
811 : m_amr_ref_ratio.resize(m_num_amr_levels);
812 : m_num_mg_levels.resize(m_num_amr_levels);
813 :
814 : m_geom.resize(m_num_amr_levels);
815 : m_grids.resize(m_num_amr_levels);
816 : m_dmap.resize(m_num_amr_levels);
817 : m_factory.resize(m_num_amr_levels);
818 :
819 : m_default_comm = ParallelContext::CommunicatorSub();
820 :
821 : const RealBox& rb = a_geom[0].ProbDomain();
822 : const int coord = a_geom[0].Coord();
823 : const Array<int,AMREX_SPACEDIM>& is_per = a_geom[0].isPeriodic();
824 :
825 : IntVect mg_coarsen_ratio_v(mg_coarsen_ratio);
826 : IntVect mg_box_min_width_v(mg_box_min_width);
827 : IntVect mg_domain_min_width_v(mg_domain_min_width);
828 : if (hasHiddenDimension()) {
829 : AMREX_ASSERT_WITH_MESSAGE(AMREX_SPACEDIM == 3 && m_num_amr_levels == 1,
830 : "Hidden direction only supported for 3d level solve");
831 : mg_coarsen_ratio_v[info.hidden_direction] = 1;
832 : mg_box_min_width_v[info.hidden_direction] = 0;
833 : mg_domain_min_width_v[info.hidden_direction] = 0;
834 : }
835 :
836 : // fine amr levels
837 : for (int amrlev = m_num_amr_levels-1; amrlev > 0; --amrlev)
838 : {
839 : m_num_mg_levels[amrlev] = 1;
840 : m_geom[amrlev].push_back(a_geom[amrlev]);
841 : m_grids[amrlev].push_back(a_grids[amrlev]);
842 : m_dmap[amrlev].push_back(a_dmap[amrlev]);
843 : if (amrlev < a_factory.size()) {
844 : m_factory[amrlev].emplace_back(a_factory[amrlev]->clone());
845 : } else {
846 : m_factory[amrlev].push_back(std::make_unique<DefaultFabFactory<FAB>>());
847 : }
848 :
849 : IntVect rr = mg_coarsen_ratio_v;
850 : const Box& dom = a_geom[amrlev].Domain();
851 : for (int i = 0; i < 2; ++i)
852 : {
853 : if (!dom.coarsenable(rr)) { amrex::Abort("MLLinOp: Uncoarsenable domain"); }
854 :
855 : const Box& cdom = amrex::coarsen(dom,rr);
856 : if (cdom == a_geom[amrlev-1].Domain()) { break; }
857 :
858 : ++(m_num_mg_levels[amrlev]);
859 :
860 : m_geom[amrlev].emplace_back(cdom, rb, coord, is_per);
861 :
862 : m_grids[amrlev].push_back(a_grids[amrlev]);
863 : AMREX_ASSERT(m_grids[amrlev].back().coarsenable(rr));
864 : m_grids[amrlev].back().coarsen(rr);
865 :
866 : m_dmap[amrlev].push_back(a_dmap[amrlev]);
867 :
868 : rr *= mg_coarsen_ratio_v;
869 : }
870 :
871 : #if (AMREX_SPACEDIM > 1)
872 : if (hasHiddenDimension()) {
873 : m_amr_ref_ratio[amrlev-1] = rr[AMREX_SPACEDIM-info.hidden_direction];
874 : } else
875 : #endif
876 : {
877 : m_amr_ref_ratio[amrlev-1] = rr[0];
878 : }
879 : }
880 :
881 : // coarsest amr level
882 : m_num_mg_levels[0] = 1;
883 : m_geom[0].push_back(a_geom[0]);
884 : m_grids[0].push_back(a_grids[0]);
885 : m_dmap[0].push_back(a_dmap[0]);
886 : if (!a_factory.empty()) {
887 : m_factory[0].emplace_back(a_factory[0]->clone());
888 : } else {
889 : m_factory[0].push_back(std::make_unique<DefaultFabFactory<FAB>>());
890 : }
891 :
892 : m_domain_covered.resize(m_num_amr_levels, false);
893 : auto npts0 = m_grids[0][0].numPts();
894 : m_domain_covered[0] = (npts0 == compactify(m_geom[0][0].Domain()).numPts());
895 : for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
896 : {
897 : if (!m_domain_covered[amrlev-1]) { break; }
898 : m_domain_covered[amrlev] = (m_grids[amrlev][0].numPts() ==
899 : compactify(m_geom[amrlev][0].Domain()).numPts());
900 : }
901 :
902 : Box aggbox;
903 : bool aggable = false;
904 :
905 : if (m_grids[0][0].size() > 1 && info.do_agglomeration)
906 : {
907 : if (m_domain_covered[0])
908 : {
909 : aggbox = m_geom[0][0].Domain();
910 : if (hasHiddenDimension()) {
911 : aggbox.makeSlab(hiddenDirection(), m_grids[0][0][0].smallEnd(hiddenDirection()));
912 : }
913 : aggable = true;
914 : }
915 : else
916 : {
917 : aggbox = m_grids[0][0].minimalBox();
918 : aggable = (aggbox.numPts() == npts0);
919 : }
920 : }
921 :
922 : bool agged = false;
923 : bool coned = false;
924 : int agg_lev = 0, con_lev = 0;
925 :
926 : AMREX_ALWAYS_ASSERT( ! (info.do_semicoarsening && info.hasHiddenDimension())
927 : && info.semicoarsening_direction >= -1
928 : && info.semicoarsening_direction < AMREX_SPACEDIM );
929 :
930 : if (info.do_agglomeration && aggable)
931 : {
932 : Box dbx = m_geom[0][0].Domain();
933 : Box bbx = aggbox;
934 : Real const nbxs = static_cast<Real>(m_grids[0][0].size());
935 : Real const threshold_npts = static_cast<Real>(AMREX_D_TERM(info.agg_grid_size,
936 : *info.agg_grid_size,
937 : *info.agg_grid_size));
938 : Vector<Box> domainboxes{dbx};
939 : Vector<Box> boundboxes{bbx};
940 : Vector<int> agg_flag{false};
941 : Vector<IntVect> accum_coarsen_ratio{IntVect(1)};
942 : int numsclevs = 0;
943 :
944 : for (int lev = 0; lev < info.max_coarsening_level; ++lev)
945 : {
946 : IntVect rr_level = mg_coarsen_ratio_v;
947 : bool const do_semicoarsening_level = info.do_semicoarsening
948 : && numsclevs < info.max_semicoarsening_level;
949 : if (do_semicoarsening_level
950 : && info.semicoarsening_direction != -1)
951 : {
952 : rr_level[info.semicoarsening_direction] = 1;
953 : }
954 : IntVect is_coarsenable;
955 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
956 : IntVect rr_dir(1);
957 : rr_dir[idim] = rr_level[idim];
958 : is_coarsenable[idim] = dbx.coarsenable(rr_dir, mg_domain_min_width_v)
959 : && bbx.coarsenable(rr_dir, mg_box_min_width_v);
960 : if (!is_coarsenable[idim] && do_semicoarsening_level
961 : && info.semicoarsening_direction == -1)
962 : {
963 : is_coarsenable[idim] = true;
964 : rr_level[idim] = 1;
965 : }
966 : }
967 : if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
968 : break;
969 : }
970 : if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
971 : // make sure there is at most one direction that is not coarsened
972 : int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
973 : + static_cast<int>(rr_level[1] == 1),
974 : + static_cast<int>(rr_level[2] == 1));
975 : if (n_ones > 1) { break; }
976 : }
977 : if (rr_level != mg_coarsen_ratio_v) {
978 : ++numsclevs;
979 : }
980 :
981 : accum_coarsen_ratio.push_back(accum_coarsen_ratio.back()*rr_level);
982 : domainboxes.push_back(dbx.coarsen(rr_level));
983 : boundboxes.push_back(bbx.coarsen(rr_level));
984 : bool to_agg = (bbx.d_numPts() / nbxs) < 0.999*threshold_npts;
985 : agg_flag.push_back(to_agg);
986 : }
987 :
988 : for (int lev = 1, nlevs = static_cast<int>(domainboxes.size()); lev < nlevs; ++lev) {
989 : if (!agged && !agg_flag[lev] &&
990 : a_grids[0].coarsenable(accum_coarsen_ratio[lev], mg_box_min_width_v))
991 : {
992 : m_grids[0].push_back(amrex::coarsen(a_grids[0], accum_coarsen_ratio[lev]));
993 : m_dmap[0].push_back(a_dmap[0]);
994 : } else {
995 : IntVect cr = domainboxes[lev-1].length() / domainboxes[lev].length();
996 : if (!m_grids[0].back().coarsenable(cr)) {
997 : break; // average_down would fail if fine boxarray is not coarsenable.
998 : }
999 : m_grids[0].emplace_back(boundboxes[lev]);
1000 : IntVect max_grid_size(info.agg_grid_size);
1001 : if (info.do_semicoarsening && info.max_semicoarsening_level >= lev
1002 : && info.semicoarsening_direction != -1)
1003 : {
1004 : IntVect blen = amrex::enclosedCells(boundboxes[lev]).size();
1005 : AMREX_D_TERM(int mgs_0 = (max_grid_size[0]+blen[0]-1) / blen[0];,
1006 : int mgs_1 = (max_grid_size[1]+blen[1]-1) / blen[1];,
1007 : int mgs_2 = (max_grid_size[2]+blen[2]-1) / blen[2]);
1008 : max_grid_size[info.semicoarsening_direction]
1009 : *= AMREX_D_TERM(mgs_0, *mgs_1, *mgs_2);
1010 : }
1011 : m_grids[0].back().maxSize(max_grid_size);
1012 : m_dmap[0].push_back(DistributionMapping());
1013 : if (!agged) {
1014 : agged = true;
1015 : agg_lev = lev;
1016 : }
1017 : }
1018 : m_geom[0].emplace_back(domainboxes[lev],rb,coord,is_per);
1019 : }
1020 : }
1021 : else
1022 : {
1023 : Long consolidation_threshold = 0;
1024 : Real avg_npts = 0.0;
1025 : if (info.do_consolidation) {
1026 : avg_npts = static_cast<Real>(a_grids[0].d_numPts()) / static_cast<Real>(ParallelContext::NProcsSub());
1027 : consolidation_threshold = AMREX_D_TERM(Long(info.con_grid_size),
1028 : *info.con_grid_size,
1029 : *info.con_grid_size);
1030 : }
1031 :
1032 : Box const& dom0 = a_geom[0].Domain();
1033 : IntVect rr_vec(1);
1034 : int numsclevs = 0;
1035 : for (int lev = 0; lev < info.max_coarsening_level; ++lev)
1036 : {
1037 : IntVect rr_level = mg_coarsen_ratio_v;
1038 : bool do_semicoarsening_level = info.do_semicoarsening
1039 : && numsclevs < info.max_semicoarsening_level;
1040 : if (do_semicoarsening_level
1041 : && info.semicoarsening_direction != -1)
1042 : {
1043 : rr_level[info.semicoarsening_direction] = 1;
1044 : }
1045 : IntVect is_coarsenable;
1046 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1047 : IntVect rr_dir(1);
1048 : rr_dir[idim] = rr_vec[idim] * rr_level[idim];
1049 : is_coarsenable[idim] = dom0.coarsenable(rr_dir, mg_domain_min_width_v)
1050 : && a_grids[0].coarsenable(rr_dir, mg_box_min_width_v);
1051 : if (!is_coarsenable[idim] && do_semicoarsening_level
1052 : && info.semicoarsening_direction == -1)
1053 : {
1054 : is_coarsenable[idim] = true;
1055 : rr_level[idim] = 1;
1056 : }
1057 : }
1058 : if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
1059 : break;
1060 : }
1061 : if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
1062 : // make sure there is at most one direction that is not coarsened
1063 : int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
1064 : + static_cast<int>(rr_level[1] == 1),
1065 : + static_cast<int>(rr_level[2] == 1));
1066 : if (n_ones > 1) { break; }
1067 : }
1068 : if (rr_level != mg_coarsen_ratio_v) {
1069 : ++numsclevs;
1070 : }
1071 : rr_vec *= rr_level;
1072 :
1073 : m_geom[0].emplace_back(amrex::coarsen(dom0, rr_vec), rb, coord, is_per);
1074 : m_grids[0].push_back(amrex::coarsen(a_grids[0], rr_vec));
1075 :
1076 : if (info.do_consolidation)
1077 : {
1078 : if (avg_npts/static_cast<Real>(AMREX_D_TERM(rr_vec[0], *rr_vec[1], *rr_vec[2]))
1079 : < Real(0.999)*static_cast<Real>(consolidation_threshold))
1080 : {
1081 : coned = true;
1082 : con_lev = m_dmap[0].size();
1083 : m_dmap[0].push_back(DistributionMapping());
1084 : }
1085 : else
1086 : {
1087 : m_dmap[0].push_back(m_dmap[0].back());
1088 : }
1089 : }
1090 : else
1091 : {
1092 : m_dmap[0].push_back(a_dmap[0]);
1093 : }
1094 : }
1095 : }
1096 :
1097 : m_num_mg_levels[0] = m_grids[0].size();
1098 :
1099 : for (int mglev = 0; mglev < m_num_mg_levels[0] - 1; mglev++){
1100 : const Box& fine_domain = m_geom[0][mglev].Domain();
1101 : const Box& crse_domain = m_geom[0][mglev+1].Domain();
1102 : mg_coarsen_ratio_vec.push_back(fine_domain.length()/crse_domain.length());
1103 : }
1104 :
1105 : for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev) {
1106 : if (AMRRefRatio(amrlev) == 4 && mg_coarsen_ratio_vec.empty()) {
1107 : mg_coarsen_ratio_vec.push_back(IntVect(2));
1108 : }
1109 : }
1110 :
1111 : if (agged)
1112 : {
1113 : makeAgglomeratedDMap(m_grids[0], m_dmap[0]);
1114 : }
1115 : else if (coned)
1116 : {
1117 : makeConsolidatedDMap(m_grids[0], m_dmap[0], info.con_ratio, info.con_strategy);
1118 : }
1119 :
1120 : if (agged || coned)
1121 : {
1122 : m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1123 : }
1124 : else
1125 : {
1126 : m_bottom_comm = m_default_comm;
1127 : }
1128 :
1129 : m_do_agglomeration = agged;
1130 : m_do_consolidation = coned;
1131 :
1132 : if (verbose > 1) {
1133 : if (agged) {
1134 : Print() << "MLLinOp::defineGrids(): agglomerated AMR level 0 starting at MG level "
1135 : << agg_lev << " of " << m_num_mg_levels[0] << "\n";
1136 : } else if (coned) {
1137 : Print() << "MLLinOp::defineGrids(): consolidated AMR level 0 starting at MG level "
1138 : << con_lev << " of " << m_num_mg_levels[0]
1139 : << " (ratio = " << info.con_ratio << ")" << "\n";
1140 : } else {
1141 : Print() << "MLLinOp::defineGrids(): no agglomeration or consolidation of AMR level 0\n";
1142 : }
1143 : }
1144 :
1145 : for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
1146 : {
1147 : for (int mglev = 1; mglev < m_num_mg_levels[amrlev]; ++mglev)
1148 : {
1149 : m_factory[amrlev].emplace_back(makeFactory(amrlev,mglev));
1150 : }
1151 : }
1152 :
1153 : for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
1154 : {
1155 : AMREX_ASSERT_WITH_MESSAGE(m_grids[amrlev][0].coarsenable(m_amr_ref_ratio[amrlev-1]),
1156 : "MLLinOp: grids not coarsenable between AMR levels");
1157 : }
1158 : }
1159 :
1160 : template <typename MF>
1161 : void
1162 : MLLinOpT<MF>::defineBC ()
1163 : {
1164 : m_needs_coarse_data_for_bc = !m_domain_covered[0];
1165 :
1166 : levelbc_raii.resize(m_num_amr_levels);
1167 : robin_a_raii.resize(m_num_amr_levels);
1168 : robin_b_raii.resize(m_num_amr_levels);
1169 : robin_f_raii.resize(m_num_amr_levels);
1170 : }
1171 :
1172 : template <typename MF>
1173 : void
1174 : MLLinOpT<MF>::setDomainBC (const Array<BCType,AMREX_SPACEDIM>& a_lobc,
1175 : const Array<BCType,AMREX_SPACEDIM>& a_hibc) noexcept
1176 : {
1177 : const int ncomp = getNComp();
1178 : setDomainBC(Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_lobc),
1179 : Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_hibc));
1180 : }
1181 :
1182 : template <typename MF>
1183 : void
1184 : MLLinOpT<MF>::setDomainBC (const Vector<Array<BCType,AMREX_SPACEDIM> >& a_lobc,
1185 : const Vector<Array<BCType,AMREX_SPACEDIM> >& a_hibc) noexcept
1186 : {
1187 : const int ncomp = getNComp();
1188 : AMREX_ASSERT_WITH_MESSAGE(ncomp == a_lobc.size() && ncomp == a_hibc.size(),
1189 : "MLLinOp::setDomainBC: wrong size");
1190 : m_lobc = a_lobc;
1191 : m_hibc = a_hibc;
1192 : m_lobc_orig = m_lobc;
1193 : m_hibc_orig = m_hibc;
1194 : for (int icomp = 0; icomp < ncomp; ++icomp) {
1195 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1196 : if (m_geom[0][0].isPeriodic(idim)) {
1197 : AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] == BCType::Periodic &&
1198 : m_hibc[icomp][idim] == BCType::Periodic);
1199 : } else {
1200 : AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] != BCType::Periodic &&
1201 : m_hibc[icomp][idim] != BCType::Periodic);
1202 : }
1203 :
1204 : if (m_lobc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1205 : m_lobc[icomp][idim] == LinOpBCType::Robin)
1206 : {
1207 : m_lobc[icomp][idim] = LinOpBCType::Neumann;
1208 : }
1209 :
1210 : if (m_hibc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1211 : m_hibc[icomp][idim] == LinOpBCType::Robin)
1212 : {
1213 : m_hibc[icomp][idim] = LinOpBCType::Neumann;
1214 : }
1215 : }
1216 : }
1217 :
1218 : if (hasHiddenDimension()) {
1219 : const int hd = hiddenDirection();
1220 : for (int n = 0; n < ncomp; ++n) {
1221 : m_lobc[n][hd] = LinOpBCType::Neumann;
1222 : m_hibc[n][hd] = LinOpBCType::Neumann;
1223 : }
1224 : }
1225 :
1226 : if (hasInhomogNeumannBC() && !supportInhomogNeumannBC()) {
1227 : amrex::Abort("Inhomogeneous Neumann BC not supported");
1228 : }
1229 : if (hasRobinBC() && !supportRobinBC()) {
1230 : amrex::Abort("Robin BC not supported");
1231 : }
1232 : }
1233 :
1234 : template <typename MF>
1235 : bool
1236 : MLLinOpT<MF>::hasBC (BCType bct) const noexcept
1237 : {
1238 : int ncomp = m_lobc_orig.size();
1239 : for (int n = 0; n < ncomp; ++n) {
1240 : for (int idim = 0; idim <AMREX_SPACEDIM; ++idim) {
1241 : if (m_lobc_orig[n][idim] == bct || m_hibc_orig[n][idim] == bct) {
1242 : return true;
1243 : }
1244 : }
1245 : }
1246 : return false;
1247 : }
1248 :
1249 : template <typename MF>
1250 : bool
1251 : MLLinOpT<MF>::hasInhomogNeumannBC () const noexcept
1252 : {
1253 : return hasBC(BCType::inhomogNeumann);
1254 : }
1255 :
1256 : template <typename MF>
1257 : bool
1258 : MLLinOpT<MF>::hasRobinBC () const noexcept
1259 : {
1260 : return hasBC(BCType::Robin);
1261 : }
1262 :
1263 : template <typename MF>
1264 : Box
1265 : MLLinOpT<MF>::compactify (Box const& b) const noexcept
1266 : {
1267 : #if (AMREX_SPACEDIM == 3)
1268 : if (info.hasHiddenDimension()) {
1269 : const auto& lo = b.smallEnd();
1270 : const auto& hi = b.bigEnd();
1271 : if (info.hidden_direction == 0) {
1272 : return Box(IntVect(lo[1],lo[2],0), IntVect(hi[1],hi[2],0), b.ixType());
1273 : } else if (info.hidden_direction == 1) {
1274 : return Box(IntVect(lo[0],lo[2],0), IntVect(hi[0],hi[2],0), b.ixType());
1275 : } else {
1276 : return Box(IntVect(lo[0],lo[1],0), IntVect(hi[0],hi[1],0), b.ixType());
1277 : }
1278 : } else
1279 : #endif
1280 : {
1281 : return b;
1282 : }
1283 : }
1284 :
1285 : template <typename MF>
1286 : void
1287 : MLLinOpT<MF>::makeAgglomeratedDMap (const Vector<BoxArray>& ba,
1288 : Vector<DistributionMapping>& dm)
1289 : {
1290 : BL_PROFILE("MLLinOp::makeAgglomeratedDMap");
1291 :
1292 : BL_ASSERT(!dm[0].empty());
1293 : for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1294 : {
1295 : if (dm[i].empty())
1296 : {
1297 : const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1298 :
1299 : const int nprocs = ParallelContext::NProcsSub();
1300 : AMREX_ASSERT(static_cast<int>(sfc.size()) == nprocs);
1301 :
1302 : Vector<int> pmap(ba[i].size());
1303 : for (int iproc = 0; iproc < nprocs; ++iproc) {
1304 : int grank = ParallelContext::local_to_global_rank(iproc);
1305 : for (int ibox : sfc[iproc]) {
1306 : pmap[ibox] = grank;
1307 : }
1308 : }
1309 : dm[i].define(std::move(pmap));
1310 : }
1311 : }
1312 : }
1313 :
1314 : template <typename MF>
1315 : void
1316 : MLLinOpT<MF>::makeConsolidatedDMap (const Vector<BoxArray>& ba,
1317 : Vector<DistributionMapping>& dm,
1318 : int ratio, int strategy)
1319 : {
1320 : BL_PROFILE("MLLinOp::makeConsolidatedDMap()");
1321 :
1322 : int factor = 1;
1323 : BL_ASSERT(!dm[0].empty());
1324 : for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1325 : {
1326 : if (dm[i].empty())
1327 : {
1328 : factor *= ratio;
1329 :
1330 : const int nprocs = ParallelContext::NProcsSub();
1331 : const auto& pmap_fine = dm[i-1].ProcessorMap();
1332 : Vector<int> pmap(pmap_fine.size());
1333 : ParallelContext::global_to_local_rank(pmap.data(), pmap_fine.data(), static_cast<int>(pmap.size()));
1334 : if (strategy == 1) {
1335 : for (auto& x: pmap) {
1336 : x /= ratio;
1337 : }
1338 : } else if (strategy == 2) {
1339 : int nprocs_con = static_cast<int>(std::ceil(static_cast<Real>(nprocs)
1340 : / static_cast<Real>(factor)));
1341 : for (auto& x: pmap) {
1342 : auto d = std::div(x,nprocs_con);
1343 : x = d.rem;
1344 : }
1345 : } else if (strategy == 3) {
1346 : if (factor == ratio) {
1347 : const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1348 : for (int iproc = 0; iproc < nprocs; ++iproc) {
1349 : for (int ibox : sfc[iproc]) {
1350 : pmap[ibox] = iproc;
1351 : }
1352 : }
1353 : }
1354 : for (auto& x: pmap) {
1355 : x /= ratio;
1356 : }
1357 : }
1358 :
1359 : if (ParallelContext::CommunicatorSub() == ParallelDescriptor::Communicator()) {
1360 : dm[i].define(std::move(pmap));
1361 : } else {
1362 : Vector<int> pmap_g(pmap.size());
1363 : ParallelContext::local_to_global_rank(pmap_g.data(), pmap.data(), static_cast<int>(pmap.size()));
1364 : dm[i].define(std::move(pmap_g));
1365 : }
1366 : }
1367 : }
1368 : }
1369 :
1370 : template <typename MF>
1371 : MPI_Comm
1372 : MLLinOpT<MF>::makeSubCommunicator (const DistributionMapping& dm)
1373 : {
1374 : BL_PROFILE("MLLinOp::makeSubCommunicator()");
1375 :
1376 : #ifdef BL_USE_MPI
1377 :
1378 : Vector<int> newgrp_ranks = dm.ProcessorMap();
1379 : std::sort(newgrp_ranks.begin(), newgrp_ranks.end());
1380 : auto last = std::unique(newgrp_ranks.begin(), newgrp_ranks.end());
1381 : newgrp_ranks.erase(last, newgrp_ranks.end());
1382 :
1383 : MPI_Comm newcomm;
1384 : MPI_Group defgrp, newgrp;
1385 : MPI_Comm_group(m_default_comm, &defgrp);
1386 : if (ParallelContext::CommunicatorSub() == ParallelDescriptor::Communicator()) {
1387 : MPI_Group_incl(defgrp, static_cast<int>(newgrp_ranks.size()), newgrp_ranks.data(), &newgrp);
1388 : } else {
1389 : Vector<int> local_newgrp_ranks(newgrp_ranks.size());
1390 : ParallelContext::global_to_local_rank(local_newgrp_ranks.data(),
1391 : newgrp_ranks.data(), static_cast<int>(newgrp_ranks.size()));
1392 : MPI_Group_incl(defgrp, static_cast<int>(local_newgrp_ranks.size()), local_newgrp_ranks.data(), &newgrp);
1393 : }
1394 :
1395 : MPI_Comm_create(m_default_comm, newgrp, &newcomm);
1396 :
1397 : m_raii_comm = std::make_unique<CommContainer>(newcomm);
1398 :
1399 : MPI_Group_free(&defgrp);
1400 : MPI_Group_free(&newgrp);
1401 :
1402 : return newcomm;
1403 : #else
1404 : amrex::ignore_unused(dm);
1405 : return m_default_comm;
1406 : #endif
1407 : }
1408 :
1409 : template <typename MF>
1410 : void
1411 : MLLinOpT<MF>::setDomainBCLoc (const Array<Real,AMREX_SPACEDIM>& lo_bcloc,
1412 : const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept
1413 : {
1414 : m_domain_bloc_lo = lo_bcloc;
1415 : m_domain_bloc_hi = hi_bcloc;
1416 : }
1417 :
1418 : template <typename MF>
1419 : void
1420 : MLLinOpT<MF>::setCoarseFineBC (const MF* crse, int crse_ratio,
1421 : LinOpBCType bc_type) noexcept
1422 : {
1423 : setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1424 : }
1425 :
1426 : template <typename MF>
1427 : void
1428 : MLLinOpT<MF>::setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
1429 : LinOpBCType bc_type) noexcept
1430 : {
1431 : m_coarse_data_for_bc = crse;
1432 : m_coarse_data_crse_ratio = crse_ratio;
1433 : m_coarse_fine_bc_type = bc_type;
1434 : }
1435 :
1436 : template <typename MF>
1437 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1438 : void
1439 : MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, int crse_ratio,
1440 : LinOpBCType bc_type) noexcept
1441 : {
1442 : setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1443 : }
1444 :
1445 : template <typename MF>
1446 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1447 : void
1448 : MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
1449 : LinOpBCType bc_type) noexcept
1450 : {
1451 : m_coarse_data_for_bc_raii = MF(crse->boxArray(), crse->DistributionMap(),
1452 : crse->nComp(), crse->nGrowVect());
1453 : m_coarse_data_for_bc_raii.LocalCopy(*crse, 0, 0, crse->nComp(),
1454 : crse->nGrowVect());
1455 : m_coarse_data_for_bc = &m_coarse_data_for_bc_raii;
1456 : m_coarse_data_crse_ratio = crse_ratio;
1457 : m_coarse_fine_bc_type = bc_type;
1458 : }
1459 :
1460 : template <typename MF>
1461 : void
1462 : MLLinOpT<MF>::make (Vector<Vector<MF> >& mf, IntVect const& ng) const
1463 : {
1464 : mf.clear();
1465 : mf.resize(m_num_amr_levels);
1466 : for (int alev = 0; alev < m_num_amr_levels; ++alev) {
1467 : mf[alev].resize(m_num_mg_levels[alev]);
1468 : for (int mlev = 0; mlev < m_num_mg_levels[alev]; ++mlev) {
1469 : mf[alev][mlev] = make(alev, mlev, ng);
1470 : }
1471 : }
1472 : }
1473 :
1474 : template <typename MF>
1475 : MF
1476 : MLLinOpT<MF>::make (int amrlev, int mglev, IntVect const& ng) const
1477 : {
1478 : if constexpr (IsMultiFabLike_v<MF>) {
1479 : return MF(amrex::convert(m_grids[amrlev][mglev], m_ixtype),
1480 : m_dmap[amrlev][mglev], getNComp(), ng, MFInfo(),
1481 : *m_factory[amrlev][mglev]);
1482 : } else {
1483 : amrex::ignore_unused(amrlev, mglev, ng);
1484 : amrex::Abort("MLLinOpT::make: how did we get here?");
1485 : return {};
1486 : }
1487 : }
1488 :
1489 : template <typename MF>
1490 : MF
1491 : MLLinOpT<MF>::makeAlias (MF const& mf) const
1492 : {
1493 : if constexpr (IsMultiFabLike_v<MF>) {
1494 : return MF(mf, amrex::make_alias, 0, mf.nComp());
1495 : } else {
1496 : amrex::ignore_unused(mf);
1497 : amrex::Abort("MLLinOpT::makeAlias: how did we get here?");
1498 : return {};
1499 : }
1500 : }
1501 :
1502 : template <typename MF>
1503 : MF
1504 : MLLinOpT<MF>::makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const
1505 : {
1506 : if constexpr (IsMultiFabLike_v<MF>) {
1507 : BoxArray cba = m_grids[amrlev][mglev];
1508 : IntVect ratio = (amrlev > 0) ? IntVect(2) : mg_coarsen_ratio_vec[mglev];
1509 : cba.coarsen(ratio);
1510 : cba.convert(m_ixtype);
1511 : return MF(cba, m_dmap[amrlev][mglev], getNComp(), ng);
1512 : } else {
1513 : amrex::ignore_unused(amrlev, mglev, ng);
1514 : amrex::Abort("MLLinOpT::makeCoarseMG: how did we get here?");
1515 : return {};
1516 : }
1517 : }
1518 :
1519 : template <typename MF>
1520 : MF
1521 : MLLinOpT<MF>::makeCoarseAmr (int famrlev, IntVect const& ng) const
1522 : {
1523 : if constexpr (IsMultiFabLike_v<MF>) {
1524 : BoxArray cba = m_grids[famrlev][0];
1525 : IntVect ratio(AMRRefRatio(famrlev-1));
1526 : cba.coarsen(ratio);
1527 : cba.convert(m_ixtype);
1528 : return MF(cba, m_dmap[famrlev][0], getNComp(), ng);
1529 : } else {
1530 : amrex::ignore_unused(famrlev, ng);
1531 : amrex::Abort("MLLinOpT::makeCoarseAmr: how did we get here?");
1532 : return {};
1533 : }
1534 : }
1535 :
1536 : template <typename MF>
1537 : void
1538 : MLLinOpT<MF>::resizeMultiGrid (int new_size)
1539 : {
1540 : if (new_size <= 0 || new_size >= m_num_mg_levels[0]) { return; }
1541 :
1542 : m_num_mg_levels[0] = new_size;
1543 :
1544 : m_geom[0].resize(new_size);
1545 : m_grids[0].resize(new_size);
1546 : m_dmap[0].resize(new_size);
1547 : m_factory[0].resize(new_size);
1548 :
1549 : if (m_bottom_comm != m_default_comm) {
1550 : m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1551 : }
1552 : }
1553 :
1554 : template <typename MF>
1555 : void
1556 : MLLinOpT<MF>::avgDownResMG (int clev, MF& cres, MF const& fres) const
1557 : {
1558 : amrex::ignore_unused(clev, cres, fres);
1559 : if constexpr (amrex::IsFabArray<MF>::value) {
1560 : const int ncomp = this->getNComp();
1561 : #ifdef AMREX_USE_EB
1562 : if (!fres.isAllRegular()) {
1563 : if constexpr (std::is_same<MF,MultiFab>()) {
1564 : amrex::EB_average_down(fres, cres, 0, ncomp,
1565 : mg_coarsen_ratio_vec[clev-1]);
1566 : } else {
1567 : amrex::Abort("EB_average_down only works with MultiFab");
1568 : }
1569 : } else
1570 : #endif
1571 : {
1572 : amrex::average_down(fres, cres, 0, ncomp, mg_coarsen_ratio_vec[clev-1]);
1573 : }
1574 : } else {
1575 : amrex::Abort("For non-FabArray, MLLinOpT<MF>::avgDownResMG should be overridden.");
1576 : }
1577 : }
1578 :
1579 : template <typename MF>
1580 : bool
1581 : MLLinOpT<MF>::isMFIterSafe (int amrlev, int mglev1, int mglev2) const
1582 : {
1583 : return m_dmap[amrlev][mglev1] == m_dmap[amrlev][mglev2]
1584 : && BoxArray::SameRefs(m_grids[amrlev][mglev1], m_grids[amrlev][mglev2]);
1585 : }
1586 :
1587 : template <typename MF>
1588 : template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1589 : void
1590 : MLLinOpT<MF>::setLevelBC (int amrlev, const AMF* levelbcdata,
1591 : const AMF* robinbc_a, const AMF* robinbc_b,
1592 : const AMF* robinbc_f)
1593 : {
1594 : const int ncomp = this->getNComp();
1595 : if (levelbcdata) {
1596 : levelbc_raii[amrlev] = std::make_unique<MF>(levelbcdata->boxArray(),
1597 : levelbcdata->DistributionMap(),
1598 : ncomp, levelbcdata->nGrowVect());
1599 : levelbc_raii[amrlev]->LocalCopy(*levelbcdata, 0, 0, ncomp,
1600 : levelbcdata->nGrowVect());
1601 : } else {
1602 : levelbc_raii[amrlev].reset();
1603 : }
1604 :
1605 : if (robinbc_a) {
1606 : robin_a_raii[amrlev] = std::make_unique<MF>(robinbc_a->boxArray(),
1607 : robinbc_a->DistributionMap(),
1608 : ncomp, robinbc_a->nGrowVect());
1609 : robin_a_raii[amrlev]->LocalCopy(*robinbc_a, 0, 0, ncomp,
1610 : robinbc_a->nGrowVect());
1611 : } else {
1612 : robin_a_raii[amrlev].reset();
1613 : }
1614 :
1615 : if (robinbc_b) {
1616 : robin_b_raii[amrlev] = std::make_unique<MF>(robinbc_b->boxArray(),
1617 : robinbc_b->DistributionMap(),
1618 : ncomp, robinbc_b->nGrowVect());
1619 : robin_b_raii[amrlev]->LocalCopy(*robinbc_b, 0, 0, ncomp,
1620 : robinbc_b->nGrowVect());
1621 : } else {
1622 : robin_b_raii[amrlev].reset();
1623 : }
1624 :
1625 : if (robinbc_f) {
1626 : robin_f_raii[amrlev] = std::make_unique<MF>(robinbc_f->boxArray(),
1627 : robinbc_f->DistributionMap(),
1628 : ncomp, robinbc_f->nGrowVect());
1629 : robin_f_raii[amrlev]->LocalCopy(*robinbc_f, 0, 0, ncomp,
1630 : robinbc_f->nGrowVect());
1631 : } else {
1632 : robin_f_raii[amrlev].reset();
1633 : }
1634 :
1635 : this->setLevelBC(amrlev, levelbc_raii[amrlev].get(), robin_a_raii[amrlev].get(),
1636 : robin_b_raii[amrlev].get(), robin_f_raii[amrlev].get());
1637 : }
1638 :
1639 : extern template class MLLinOpT<MultiFab>;
1640 :
1641 : using MLLinOp = MLLinOpT<MultiFab>;
1642 :
1643 : }
1644 :
1645 : #endif
|