Line data Source code
1 : #ifndef AMREX_ML_MG_H_
2 : #define AMREX_ML_MG_H_
3 : #include <AMReX_Config.H>
4 :
5 : #include <AMReX_MLLinOp.H>
6 : #include <AMReX_MLCGSolver.H>
7 :
8 : namespace amrex {
9 :
10 : template <typename MF>
11 : class MLMGT
12 : {
13 : public:
14 :
15 : class error
16 : : public std::runtime_error
17 : {
18 : public :
19 : using std::runtime_error::runtime_error;
20 : };
21 :
22 : template <typename T> friend class MLCGSolverT;
23 : template <typename M> friend class GMRESMLMGT;
24 :
25 : using MFType = MF;
26 : using FAB = typename MLLinOpT<MF>::FAB;
27 : using RT = typename MLLinOpT<MF>::RT;
28 :
29 : using BCMode = typename MLLinOpT<MF>::BCMode;
30 : using Location = typename MLLinOpT<MF>::Location;
31 :
32 : using BottomSolver = amrex::BottomSolver;
33 : enum class CFStrategy : int {none,ghostnodes};
34 :
35 : MLMGT (MLLinOpT<MF>& a_lp);
36 : ~MLMGT ();
37 :
38 : MLMGT (MLMGT<MF> const&) = delete;
39 : MLMGT (MLMGT<MF> &&) = delete;
40 : MLMGT<MF>& operator= (MLMGT<MF> const&) = delete;
41 : MLMGT<MF>& operator= (MLMGT<MF> &&) = delete;
42 :
43 : // Optional argument checkpoint_file is for debugging only.
44 : template <typename AMF>
45 : RT solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
46 : RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
47 :
48 : template <typename AMF>
49 : RT solve (std::initializer_list<AMF*> a_sol,
50 : std::initializer_list<AMF const*> a_rhs,
51 : RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
52 :
53 : template <typename AMF>
54 : void getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol,
55 : Location a_loc = Location::FaceCenter);
56 :
57 : template <typename AMF>
58 : void getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol,
59 : Location a_loc = Location::FaceCenter);
60 :
61 : /**
62 : * \brief For ``(alpha * a - beta * (del dot b grad)) phi = rhs``, flux means ``-b grad phi``
63 : */
64 : template <typename AMF>
65 : void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
66 : Location a_loc = Location::FaceCenter);
67 :
68 : template <typename AMF>
69 : void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
70 : Location a_loc = Location::FaceCenter);
71 :
72 : template <typename AMF>
73 : void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
74 : const Vector<AMF*> & a_sol,
75 : Location a_loc = Location::FaceCenter);
76 :
77 : template <typename AMF>
78 : void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
79 : std::initializer_list<AMF*> a_sol,
80 : Location a_loc = Location::FaceCenter);
81 :
82 : template <typename AMF>
83 : void getFluxes (const Vector<AMF*> & a_flux,
84 : Location a_loc = Location::CellCenter);
85 :
86 : template <typename AMF>
87 : void getFluxes (std::initializer_list<AMF*> a_flux,
88 : Location a_loc = Location::CellCenter);
89 :
90 : template <typename AMF>
91 : void getFluxes (const Vector<AMF*> & a_flux,
92 : const Vector<AMF*> & a_sol,
93 : Location a_loc = Location::CellCenter);
94 :
95 : template <typename AMF>
96 : void getFluxes (std::initializer_list<AMF*> a_flux,
97 : std::initializer_list<AMF*> a_sol,
98 : Location a_loc = Location::CellCenter);
99 :
100 : void compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
101 : const Vector<MF const*>& a_rhs);
102 :
103 : #ifdef AMREX_USE_EB
104 : // Flux into the EB wall
105 : void getEBFluxes (const Vector<MF*>& a_eb_flux);
106 : void getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*> & a_sol);
107 : #endif
108 :
109 : /**
110 : * \brief ``out = L(in)``. Note that, if no actual solve is needed, one could
111 : * turn off multigrid coarsening by constructing a MLLinOp object
112 : * with an appropriate LPInfo object (e.g., with LPInfo().setMaxCoarseningLevel(0)).
113 : */
114 : void apply (const Vector<MF*>& out, const Vector<MF*>& in);
115 :
116 : void setThrowException (bool t) noexcept { throw_exception = t; }
117 : void setVerbose (int v) noexcept { verbose = v; }
118 : void setMaxIter (int n) noexcept { max_iters = n; }
119 : void setMaxFmgIter (int n) noexcept { max_fmg_iters = n; }
120 : void setFixedIter (int nit) noexcept { do_fixed_number_of_iters = nit; }
121 :
122 : void setPreSmooth (int n) noexcept { nu1 = n; }
123 : void setPostSmooth (int n) noexcept { nu2 = n; }
124 : void setFinalSmooth (int n) noexcept { nuf = n; }
125 : void setBottomSmooth (int n) noexcept { nub = n; }
126 :
127 : void setBottomSolver (BottomSolver s) noexcept { bottom_solver = s; }
128 : void setCFStrategy (CFStrategy a_cf_strategy) noexcept {cf_strategy = a_cf_strategy;}
129 : void setBottomVerbose (int v) noexcept { bottom_verbose = v; }
130 : void setBottomMaxIter (int n) noexcept { bottom_maxiter = n; }
131 : void setBottomTolerance (RT t) noexcept { bottom_reltol = t; }
132 : void setBottomToleranceAbs (RT t) noexcept { bottom_abstol = t;}
133 : RT getBottomToleranceAbs () noexcept{ return bottom_abstol; }
134 :
135 : void setAlwaysUseBNorm (int flag) noexcept { always_use_bnorm = flag; }
136 :
137 : void setFinalFillBC (int flag) noexcept { final_fill_bc = flag; }
138 :
139 : [[nodiscard]] int numAMRLevels () const noexcept { return namrlevs; }
140 :
141 : void setNSolve (int flag) noexcept { do_nsolve = flag; }
142 : void setNSolveGridSize (int s) noexcept { nsolve_grid_size = s; }
143 :
144 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
145 : void setHypreInterface (Hypre::Interface f) noexcept {
146 : // must use ij interface for EB
147 : #ifndef AMREX_USE_EB
148 : hypre_interface = f;
149 : #else
150 : amrex::ignore_unused(f);
151 : #endif
152 : }
153 :
154 : //! Set the namespace in input file for parsing HYPRE specific options
155 : void setHypreOptionsNamespace(const std::string& prefix) noexcept
156 : {
157 : hypre_options_namespace = prefix;
158 : }
159 :
160 : void setHypreOldDefault (bool l) noexcept {hypre_old_default = l;}
161 : void setHypreRelaxType (int n) noexcept {hypre_relax_type = n;}
162 : void setHypreRelaxOrder (int n) noexcept {hypre_relax_order = n;}
163 : void setHypreNumSweeps (int n) noexcept {hypre_num_sweeps = n;}
164 : void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
165 : #endif
166 :
167 : template <typename AMF>
168 : void prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs);
169 :
170 : void prepareForNSolve ();
171 :
172 : void prepareLinOp ();
173 :
174 : void prepareMGcycle ();
175 :
176 : void prepareForGMRES ();
177 :
178 : void oneIter (int iter);
179 :
180 : void miniCycle (int amrlev);
181 :
182 : void mgVcycle (int amrlev, int mglev);
183 : void mgFcycle ();
184 :
185 : void bottomSolve ();
186 : void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
187 : void actualBottomSolve ();
188 :
189 : void computeMLResidual (int amrlevmax);
190 : void computeResidual (int alev);
191 : void computeResWithCrseSolFineCor (int calev, int falev);
192 : void computeResWithCrseCorFineCor (int falev);
193 : void interpCorrection (int alev);
194 : void interpCorrection (int alev, int mglev);
195 : void addInterpCorrection (int alev, int mglev);
196 :
197 : void computeResOfCorrection (int amrlev, int mglev);
198 :
199 : RT ResNormInf (int alev, bool local = false);
200 : RT MLResNormInf (int alevmax, bool local = false);
201 : RT MLRhsNormInf (bool local = false);
202 :
203 : void makeSolvable ();
204 : void makeSolvable (int amrlev, int mglev, MF& mf);
205 :
206 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
207 : template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
208 : void bottomSolveWithHypre (MF& x, const MF& b);
209 : #endif
210 :
211 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
212 : template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
213 : void bottomSolveWithPETSc (MF& x, const MF& b);
214 : #endif
215 :
216 : int bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type);
217 :
218 : [[nodiscard]] RT getInitRHS () const noexcept { return m_rhsnorm0; }
219 : // Initial composite residual
220 : [[nodiscard]] RT getInitResidual () const noexcept { return m_init_resnorm0; }
221 : // Final composite residual
222 : [[nodiscard]] RT getFinalResidual () const noexcept { return m_final_resnorm0; }
223 : // Residuals on the *finest* AMR level after each iteration
224 : [[nodiscard]] Vector<RT> const& getResidualHistory () const noexcept { return m_iter_fine_resnorm0; }
225 : [[nodiscard]] int getNumIters () const noexcept { return m_iter_fine_resnorm0.size(); }
226 : [[nodiscard]] Vector<int> const& getNumCGIters () const noexcept { return m_niters_cg; }
227 :
228 : MLLinOpT<MF>& getLinOp () { return linop; }
229 :
230 : private:
231 :
232 : bool throw_exception = false;
233 : int verbose = 1;
234 :
235 : int max_iters = 200;
236 : int do_fixed_number_of_iters = 0;
237 :
238 : int nu1 = 2; //!< pre
239 : int nu2 = 2; //!< post
240 : int nuf = 8; //!< when smoother is used as bottom solver
241 : int nub = 0; //!< additional smoothing after bottom cg solver
242 :
243 : int max_fmg_iters = 0;
244 :
245 : BottomSolver bottom_solver = BottomSolver::Default;
246 : CFStrategy cf_strategy = CFStrategy::none;
247 : int bottom_verbose = 0;
248 : int bottom_maxiter = 200;
249 : RT bottom_reltol = std::is_same<RT,double>() ? RT(1.e-4) : RT(1.e-3);
250 : RT bottom_abstol = RT(-1.0);
251 :
252 : int always_use_bnorm = 0;
253 :
254 : int final_fill_bc = 0;
255 :
256 : MLLinOpT<MF>& linop;
257 : int ncomp;
258 : int namrlevs;
259 : int finest_amr_lev;
260 :
261 : bool linop_prepared = false;
262 : Long solve_called = 0;
263 :
264 : //! N Solve
265 : int do_nsolve = false;
266 : int nsolve_grid_size = 16;
267 : std::unique_ptr<MLLinOpT<MF>> ns_linop;
268 : std::unique_ptr<MLMGT<MF>> ns_mlmg;
269 : std::unique_ptr<MF> ns_sol;
270 : std::unique_ptr<MF> ns_rhs;
271 :
272 : //! Hypre
273 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
274 : // Hypre::Interface hypre_interface = Hypre::Interface::structed;
275 : // Hypre::Interface hypre_interface = Hypre::Interface::semi_structed;
276 : Hypre::Interface hypre_interface = Hypre::Interface::ij;
277 :
278 : std::unique_ptr<Hypre> hypre_solver;
279 : std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
280 : std::unique_ptr<HypreNodeLap> hypre_node_solver;
281 :
282 : std::string hypre_options_namespace = "hypre";
283 : bool hypre_old_default = true; // Falgout coarsening with modified classical interpolation
284 : int hypre_relax_type = 6; // G-S/Jacobi hybrid relaxation
285 : int hypre_relax_order = 1; // uses C/F relaxation
286 : int hypre_num_sweeps = 2; // Sweeps on each level
287 : Real hypre_strong_threshold = 0.25; // Hypre default is 0.25
288 : #endif
289 :
290 : //! PETSc
291 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
292 : std::unique_ptr<PETScABecLap> petsc_solver;
293 : std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
294 : #endif
295 :
296 : /**
297 : * \brief To avoid confusion, terms like sol, cor, rhs, res, ... etc. are
298 : * in the frame of the original equation, not the correction form
299 : */
300 : Vector<MF> sol; //!< Might be alias to argument a_sol
301 : Vector<MF> rhs; //!< Copy of original rhs
302 : //! L(sol) = rhs
303 :
304 : Vector<int> sol_is_alias;
305 :
306 : /**
307 : * \brief First Vector: Amr levels. 0 is the coarest level
308 : * Second Vector: MG levels. 0 is the finest level
309 : */
310 : Vector<Vector<MF> > res; //! = rhs - L(sol)
311 : Vector<Vector<MF> > cor; //!< L(cor) = res
312 : Vector<Vector<MF> > cor_hold;
313 : Vector<Vector<MF> > rescor; //!< = res - L(cor)
314 : //! Residual of the correction form
315 :
316 : enum timer_types { solve_time=0, iter_time, bottom_time, ntimers };
317 : Vector<double> timer;
318 :
319 : RT m_rhsnorm0 = RT(-1.0);
320 : RT m_init_resnorm0 = RT(-1.0);
321 : RT m_final_resnorm0 = RT(-1.0);
322 : Vector<int> m_niters_cg;
323 : Vector<RT> m_iter_fine_resnorm0; // Residual for each iteration at the finest level
324 :
325 : void checkPoint (const Vector<MultiFab*>& a_sol,
326 : const Vector<MultiFab const*>& a_rhs,
327 : RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const;
328 :
329 : };
330 :
331 : template <typename MF>
332 : MLMGT<MF>::MLMGT (MLLinOpT<MF>& a_lp)
333 : : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
334 : finest_amr_lev(a_lp.NAMRLevels()-1)
335 : {}
336 :
337 : template <typename MF> MLMGT<MF>::~MLMGT () = default;
338 :
339 : template <typename MF>
340 : template <typename AMF>
341 : auto
342 : MLMGT<MF>::solve (std::initializer_list<AMF*> a_sol,
343 : std::initializer_list<AMF const*> a_rhs,
344 : RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
345 : {
346 : return solve(Vector<AMF*>(std::move(a_sol)),
347 : Vector<AMF const*>(std::move(a_rhs)),
348 : a_tol_rel, a_tol_abs, checkpoint_file);
349 : }
350 :
351 : template <typename MF>
352 : template <typename AMF>
353 : auto
354 300 : MLMGT<MF>::solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
355 : RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
356 : {
357 : BL_PROFILE("MLMG::solve()");
358 :
359 : if constexpr (std::is_same<AMF,MultiFab>()) {
360 300 : if (checkpoint_file != nullptr) {
361 0 : checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
362 : }
363 : }
364 :
365 300 : if (bottom_solver == BottomSolver::Default) {
366 0 : bottom_solver = linop.getDefaultBottomSolver();
367 : }
368 :
369 : #if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
370 : if (bottom_solver == BottomSolver::hypre || bottom_solver == BottomSolver::petsc) {
371 : int mo = linop.getMaxOrder();
372 : if (a_sol[0]->hasEBFabFactory()) {
373 : linop.setMaxOrder(2);
374 : } else {
375 : linop.setMaxOrder(std::min(3,mo)); // maxorder = 4 not supported
376 : }
377 : }
378 : #endif
379 :
380 300 : bool is_nsolve = linop.m_parent;
381 :
382 300 : auto solve_start_time = amrex::second();
383 :
384 300 : RT& composite_norminf = m_final_resnorm0;
385 :
386 300 : m_niters_cg.clear();
387 300 : m_iter_fine_resnorm0.clear();
388 :
389 300 : prepareForSolve(a_sol, a_rhs);
390 :
391 300 : computeMLResidual(finest_amr_lev);
392 :
393 300 : bool local = true;
394 300 : RT resnorm0 = MLResNormInf(finest_amr_lev, local);
395 300 : RT rhsnorm0 = MLRhsNormInf(local);
396 300 : if (!is_nsolve) {
397 300 : ParallelAllReduce::Max<RT>({resnorm0, rhsnorm0}, ParallelContext::CommunicatorSub());
398 :
399 300 : if (verbose >= 1)
400 : {
401 600 : amrex::Print() << "MLMG: Initial rhs = " << rhsnorm0 << "\n"
402 300 : << "MLMG: Initial residual (resid0) = " << resnorm0 << "\n";
403 : }
404 : }
405 :
406 300 : m_init_resnorm0 = resnorm0;
407 300 : m_rhsnorm0 = rhsnorm0;
408 :
409 : RT max_norm;
410 300 : std::string norm_name;
411 300 : if (always_use_bnorm || rhsnorm0 >= resnorm0) {
412 300 : norm_name = "bnorm";
413 300 : max_norm = rhsnorm0;
414 : } else {
415 0 : norm_name = "resid0";
416 0 : max_norm = resnorm0;
417 : }
418 300 : const RT res_target = std::max(a_tol_abs, std::max(a_tol_rel,RT(1.e-16))*max_norm);
419 :
420 300 : if (!is_nsolve && resnorm0 <= res_target) {
421 1 : composite_norminf = resnorm0;
422 1 : if (verbose >= 1) {
423 1 : amrex::Print() << "MLMG: No iterations needed\n";
424 : }
425 : } else {
426 299 : auto iter_start_time = amrex::second();
427 299 : bool converged = false;
428 :
429 299 : const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
430 4770 : for (int iter = 0; iter < niters; ++iter)
431 : {
432 4770 : oneIter(iter);
433 :
434 4770 : converged = false;
435 :
436 : // Test convergence on the fine amr level
437 4770 : computeResidual(finest_amr_lev);
438 :
439 4770 : if (is_nsolve) { continue; }
440 :
441 4770 : RT fine_norminf = ResNormInf(finest_amr_lev);
442 4770 : m_iter_fine_resnorm0.push_back(fine_norminf);
443 4770 : composite_norminf = fine_norminf;
444 4770 : if (verbose >= 2) {
445 9540 : amrex::Print() << "MLMG: Iteration " << std::setw(3) << iter+1 << " Fine resid/"
446 4770 : << norm_name << " = " << fine_norminf/max_norm << "\n";
447 : }
448 4770 : bool fine_converged = (fine_norminf <= res_target);
449 :
450 4770 : if (namrlevs == 1 && fine_converged) {
451 299 : converged = true;
452 4471 : } else if (fine_converged) {
453 : // finest level is converged, but we still need to test the coarse levels
454 0 : computeMLResidual(finest_amr_lev-1);
455 0 : RT crse_norminf = MLResNormInf(finest_amr_lev-1);
456 0 : if (verbose >= 2) {
457 0 : amrex::Print() << "MLMG: Iteration " << std::setw(3) << iter+1
458 0 : << " Crse resid/" << norm_name << " = "
459 0 : << crse_norminf/max_norm << "\n";
460 : }
461 0 : converged = (crse_norminf <= res_target);
462 0 : composite_norminf = std::max(fine_norminf, crse_norminf);
463 : } else {
464 4471 : converged = false;
465 : }
466 :
467 4770 : if (converged) {
468 299 : if (verbose >= 1) {
469 598 : amrex::Print() << "MLMG: Final Iter. " << iter+1
470 299 : << " resid, resid/" << norm_name << " = "
471 299 : << composite_norminf << ", "
472 299 : << composite_norminf/max_norm << "\n";
473 : }
474 299 : break;
475 : } else {
476 4471 : if (composite_norminf > RT(1.e20)*max_norm)
477 : {
478 0 : if (verbose > 0) {
479 0 : amrex::Print() << "MLMG: Failing to converge after " << iter+1 << " iterations."
480 0 : << " resid, resid/" << norm_name << " = "
481 0 : << composite_norminf << ", "
482 0 : << composite_norminf/max_norm << "\n";
483 : }
484 :
485 0 : if ( throw_exception ) {
486 0 : throw error("MLMG blew up.");
487 : } else {
488 : amrex::Abort("MLMG failing so lets stop here");
489 : }
490 : }
491 : }
492 : }
493 :
494 299 : if (!converged && do_fixed_number_of_iters == 0) {
495 0 : if (verbose > 0) {
496 0 : amrex::Print() << "MLMG: Failed to converge after " << max_iters << " iterations."
497 0 : << " resid, resid/" << norm_name << " = "
498 0 : << composite_norminf << ", "
499 0 : << composite_norminf/max_norm << "\n";
500 : }
501 :
502 0 : if ( throw_exception ) {
503 0 : throw error("MLMG failed to converge.");
504 : } else {
505 : amrex::Abort("MLMG failed.");
506 : }
507 : }
508 299 : timer[iter_time] = amrex::second() - iter_start_time;
509 : }
510 :
511 300 : linop.postSolve(sol);
512 :
513 300 : IntVect ng_back = final_fill_bc ? IntVect(1) : IntVect(0);
514 300 : if (linop.hasHiddenDimension()) {
515 0 : ng_back[linop.hiddenDirection()] = 0;
516 : }
517 600 : for (int alev = 0; alev < namrlevs; ++alev)
518 : {
519 300 : if (!sol_is_alias[alev]) {
520 0 : LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
521 : }
522 : }
523 :
524 300 : timer[solve_time] = amrex::second() - solve_start_time;
525 300 : if (verbose >= 1) {
526 300 : ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
527 : ParallelContext::CommunicatorSub());
528 300 : if (ParallelContext::MyProcSub() == 0)
529 : {
530 600 : amrex::AllPrint() << "MLMG: Timers: Solve = " << timer[solve_time]
531 300 : << " Iter = " << timer[iter_time]
532 300 : << " Bottom = " << timer[bottom_time] << "\n";
533 : }
534 : }
535 :
536 300 : ++solve_called;
537 :
538 600 : return composite_norminf;
539 : }
540 :
541 : template <typename MF>
542 : template <typename AMF>
543 : void
544 : MLMGT<MF>::getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol, Location a_loc)
545 : {
546 : BL_PROFILE("MLMG::getGradSolution()");
547 : for (int alev = 0; alev <= finest_amr_lev; ++alev) {
548 : if constexpr (std::is_same<AMF,MF>()) {
549 : linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
550 : } else {
551 : Array<MF,AMREX_SPACEDIM> grad_sol;
552 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
553 : auto const& amf = *(a_grad_sol[alev][idim]);
554 : grad_sol[idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
555 : }
556 : linop.compGrad(alev, GetArrOfPtrs(grad_sol), sol[alev], a_loc);
557 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
558 : LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp, IntVect(0));
559 : }
560 : }
561 : }
562 : }
563 :
564 : template <typename MF>
565 : template <typename AMF>
566 : void
567 : MLMGT<MF>::getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol, Location a_loc)
568 : {
569 : getGradSolution(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_grad_sol)), a_loc);
570 : }
571 :
572 : template <typename MF>
573 : template <typename AMF>
574 : void
575 : MLMGT<MF>::getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
576 : Location a_loc)
577 : {
578 : if (!linop.isCellCentered()) {
579 : amrex::Abort("Calling wrong getFluxes for nodal solver");
580 : }
581 :
582 : AMREX_ASSERT(sol.size() == a_flux.size());
583 :
584 : if constexpr (std::is_same<AMF,MF>()) {
585 : getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
586 : } else {
587 : Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
588 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
589 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
590 : auto const& amf = *(a_flux[ilev][idim]);
591 : fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
592 : }
593 : }
594 : getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
595 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
596 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
597 : LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
598 : }
599 : }
600 : }
601 : }
602 :
603 : template <typename MF>
604 : template <typename AMF>
605 : void
606 : MLMGT<MF>::getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
607 : Location a_loc)
608 : {
609 : getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)), a_loc);
610 : }
611 :
612 : template <typename MF>
613 : template <typename AMF>
614 : void
615 : MLMGT<MF>::getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
616 : const Vector<AMF*>& a_sol, Location a_loc)
617 : {
618 : BL_PROFILE("MLMG::getFluxes()");
619 :
620 : if (!linop.isCellCentered()) {
621 : amrex::Abort("Calling wrong getFluxes for nodal solver");
622 : }
623 :
624 : if constexpr (std::is_same<AMF,MF>()) {
625 : linop.getFluxes(a_flux, a_sol, a_loc);
626 : } else {
627 : Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
628 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
629 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
630 : auto const& amf = *(a_flux[ilev][idim]);
631 : fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
632 : }
633 : LocalCopy(sol[ilev], *a_sol[ilev], 0, 0, ncomp, nGrowVect(sol[ilev]));
634 : }
635 : linop.getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
636 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
637 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
638 : LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
639 : }
640 : }
641 : }
642 : }
643 :
644 : template <typename MF>
645 : template <typename AMF>
646 : void
647 : MLMGT<MF>::getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
648 : std::initializer_list<AMF*> a_sol, Location a_loc)
649 : {
650 : getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)),
651 : Vector<AMF*>(std::move(a_sol)), a_loc);
652 : }
653 :
654 : template <typename MF>
655 : template <typename AMF>
656 : void
657 : MLMGT<MF>::getFluxes (const Vector<AMF*> & a_flux, Location a_loc)
658 : {
659 : AMREX_ASSERT(sol.size() == a_flux.size());
660 : if constexpr (std::is_same<AMF,MF>()) {
661 : getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
662 : } else {
663 : Vector<MF> fluxes(namrlevs);
664 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
665 : auto const& amf = *a_flux[ilev];
666 : fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
667 : }
668 : getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
669 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
670 : LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
671 : }
672 : }
673 : }
674 :
675 : template <typename MF>
676 : template <typename AMF>
677 : void
678 : MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux, Location a_loc)
679 : {
680 : getFluxes(Vector<AMF*>(std::move(a_flux)), a_loc);
681 : }
682 :
683 : template <typename MF>
684 : template <typename AMF>
685 : void
686 : MLMGT<MF>::getFluxes (const Vector<AMF*> & a_flux,
687 : const Vector<AMF*>& a_sol, Location /*a_loc*/)
688 : {
689 : AMREX_ASSERT(nComp(*a_flux[0]) >= AMREX_SPACEDIM);
690 :
691 : if constexpr (! std::is_same<AMF,MF>()) {
692 : for (int alev = 0; alev < namrlevs; ++alev) {
693 : LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, nGrowVect(sol[alev]));
694 : }
695 : }
696 :
697 : if (linop.isCellCentered())
698 : {
699 : Vector<Array<MF,AMREX_SPACEDIM> > ffluxes(namrlevs);
700 : for (int alev = 0; alev < namrlevs; ++alev) {
701 : for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
702 : const int mglev = 0;
703 : int nghost = 0;
704 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
705 : ffluxes[alev][idim].define(amrex::convert(linop.m_grids[alev][mglev],
706 : IntVect::TheDimensionVector(idim)),
707 : linop.m_dmap[alev][mglev], ncomp, nghost, MFInfo(),
708 : *linop.m_factory[alev][mglev]);
709 : }
710 : }
711 : if constexpr (std::is_same<AMF,MF>()) {
712 : getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), a_sol, Location::FaceCenter);
713 : } else {
714 : getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), GetVecOfPtrs(sol), Location::FaceCenter);
715 : }
716 : for (int alev = 0; alev < namrlevs; ++alev) {
717 : #ifdef AMREX_USE_EB
718 : EB_average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
719 : #else
720 : average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
721 : #endif
722 : }
723 :
724 : } else {
725 : if constexpr (std::is_same<AMF,MF>()) {
726 : linop.getFluxes(a_flux, a_sol);
727 : } else {
728 : Vector<MF> fluxes(namrlevs);
729 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
730 : auto const& amf = *a_flux[ilev];
731 : fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
732 : }
733 : linop.getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol));
734 : for (int ilev = 0; ilev < namrlevs; ++ilev) {
735 : LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
736 : }
737 : }
738 : }
739 : }
740 :
741 : template <typename MF>
742 : template <typename AMF>
743 : void
744 : MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux,
745 : std::initializer_list<AMF*> a_sol, Location a_loc)
746 : {
747 : getFluxes(Vector<AMF*>(std::move(a_flux)),
748 : Vector<AMF*>(std::move(a_sol)), a_loc);
749 : }
750 :
751 : #ifdef AMREX_USE_EB
752 : template <typename MF>
753 : void
754 : MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux)
755 : {
756 : if (!linop.isCellCentered()) {
757 : amrex::Abort("getEBFluxes is for cell-centered only");
758 : }
759 :
760 : AMREX_ASSERT(sol.size() == a_eb_flux.size());
761 : getEBFluxes(a_eb_flux, GetVecOfPtrs(sol));
762 : }
763 :
764 : template <typename MF>
765 : void
766 : MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*>& a_sol)
767 : {
768 : BL_PROFILE("MLMG::getEBFluxes()");
769 :
770 : if (!linop.isCellCentered()) {
771 : amrex::Abort("getEBFluxes is for cell-centered only");
772 : }
773 :
774 : linop.getEBFluxes(a_eb_flux, a_sol);
775 : }
776 : #endif
777 :
778 : template <typename MF>
779 : void
780 : MLMGT<MF>::compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
781 : const Vector<MF const*>& a_rhs)
782 : {
783 : BL_PROFILE("MLMG::compResidual()");
784 :
785 : IntVect ng_sol(1);
786 : if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
787 :
788 : sol.resize(namrlevs);
789 : sol_is_alias.resize(namrlevs,true);
790 : for (int alev = 0; alev < namrlevs; ++alev)
791 : {
792 : if (cf_strategy == CFStrategy::ghostnodes || nGrowVect(*a_sol[alev]) == ng_sol)
793 : {
794 : sol[alev] = linop.makeAlias(*a_sol[alev]);
795 : sol_is_alias[alev] = true;
796 : }
797 : else
798 : {
799 : if (sol_is_alias[alev])
800 : {
801 : sol[alev] = linop.make(alev, 0, ng_sol);
802 : }
803 : LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
804 : }
805 : }
806 :
807 : prepareLinOp();
808 :
809 : const auto& amrrr = linop.AMRRefRatio();
810 :
811 : for (int alev = finest_amr_lev; alev >= 0; --alev) {
812 : const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
813 : const MF* prhs = a_rhs[alev];
814 : #if (AMREX_SPACEDIM != 3)
815 : int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
816 : MF rhstmp(boxArray(*prhs), DistributionMap(*prhs), ncomp, nghost,
817 : MFInfo(), *linop.Factory(alev));
818 : LocalCopy(rhstmp, *prhs, 0, 0, ncomp, IntVect(nghost));
819 : linop.applyMetricTerm(alev, 0, rhstmp);
820 : linop.unimposeNeumannBC(alev, rhstmp);
821 : linop.applyInhomogNeumannTerm(alev, rhstmp);
822 : prhs = &rhstmp;
823 : #endif
824 : linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
825 : if (alev < finest_amr_lev) {
826 : linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
827 : *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
828 : if (linop.isCellCentered()) {
829 : #ifdef AMREX_USE_EB
830 : EB_average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
831 : #else
832 : average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
833 : #endif
834 : }
835 : }
836 : }
837 :
838 :
839 : #if (AMREX_SPACEDIM != 3)
840 : for (int alev = 0; alev <= finest_amr_lev; ++alev) {
841 : linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
842 : }
843 : #endif
844 : }
845 :
846 : template <typename MF>
847 : void
848 : MLMGT<MF>::apply (const Vector<MF*>& out, const Vector<MF*>& a_in)
849 : {
850 : BL_PROFILE("MLMG::apply()");
851 :
852 : Vector<MF*> in(namrlevs);
853 : Vector<MF> in_raii(namrlevs);
854 : Vector<MF> rh(namrlevs);
855 : int nghost = 0;
856 : IntVect ng_sol(1);
857 : if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
858 :
859 : for (int alev = 0; alev < namrlevs; ++alev)
860 : {
861 : if (cf_strategy == CFStrategy::ghostnodes)
862 : {
863 : nghost = linop.getNGrow(alev);
864 : in[alev] = a_in[alev];
865 : }
866 : else if (nGrowVect(*a_in[alev]) == ng_sol)
867 : {
868 : in[alev] = a_in[alev];
869 : }
870 : else
871 : {
872 : IntVect ng = ng_sol;
873 : if (cf_strategy == CFStrategy::ghostnodes) { ng = IntVect(nghost); }
874 : in_raii[alev] = linop.make(alev, 0, ng);
875 : LocalCopy(in_raii[alev], *a_in[alev], 0, 0, ncomp, IntVect(nghost));
876 : in[alev] = &(in_raii[alev]);
877 : }
878 : rh[alev] = linop.make(alev, 0, IntVect(nghost));
879 : setVal(rh[alev], RT(0.0));
880 : }
881 :
882 : prepareLinOp();
883 :
884 : for (int alev = 0; alev < namrlevs; ++alev) {
885 : linop.applyInhomogNeumannTerm(alev, rh[alev]);
886 : }
887 :
888 : const auto& amrrr = linop.AMRRefRatio();
889 :
890 : for (int alev = finest_amr_lev; alev >= 0; --alev) {
891 : const MF* crse_bcdata = (alev > 0) ? in[alev-1] : nullptr;
892 : linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
893 : if (alev < finest_amr_lev) {
894 : linop.reflux(alev, *out[alev], *in[alev], rh[alev],
895 : *out[alev+1], *in[alev+1], rh[alev+1]);
896 : if (linop.isCellCentered()) {
897 : if constexpr (IsMultiFabLike_v<MF>) {
898 : #ifdef AMREX_USE_EB
899 : EB_average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
900 : #else
901 : average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
902 : #endif
903 : } else {
904 : amrex::Abort("MLMG: TODO average_down for non-MultiFab");
905 : }
906 : }
907 : }
908 : }
909 :
910 : #if (AMREX_SPACEDIM != 3)
911 : for (int alev = 0; alev <= finest_amr_lev; ++alev) {
912 : linop.unapplyMetricTerm(alev, 0, *out[alev]);
913 : }
914 : #endif
915 :
916 : for (int alev = 0; alev <= finest_amr_lev; ++alev) {
917 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
918 : Scale(*out[alev], RT(-1), 0, nComp(*out[alev]), nghost);
919 : }
920 : }
921 :
922 : template <typename MF>
923 : template <typename AMF>
924 : void
925 300 : MLMGT<MF>::prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs)
926 : {
927 : BL_PROFILE("MLMG::prepareForSolve()");
928 :
929 : AMREX_ASSERT(namrlevs <= a_sol.size());
930 : AMREX_ASSERT(namrlevs <= a_rhs.size());
931 :
932 300 : timer.assign(ntimers, 0.0);
933 :
934 300 : IntVect ng_rhs(0);
935 300 : IntVect ng_sol(1);
936 300 : if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
937 :
938 300 : if (!linop_prepared) {
939 300 : linop.prepareForSolve();
940 300 : linop_prepared = true;
941 0 : } else if (linop.needsUpdate()) {
942 0 : linop.update();
943 :
944 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
945 : hypre_solver.reset();
946 : hypre_bndry.reset();
947 : hypre_node_solver.reset();
948 : #endif
949 :
950 : #ifdef AMREX_USE_PETSC
951 : petsc_solver.reset();
952 : petsc_bndry.reset();
953 : #endif
954 : }
955 :
956 300 : sol.resize(namrlevs);
957 300 : sol_is_alias.resize(namrlevs,false);
958 600 : for (int alev = 0; alev < namrlevs; ++alev)
959 : {
960 300 : if (cf_strategy == CFStrategy::ghostnodes)
961 : {
962 : if constexpr (std::is_same<AMF,MF>()) {
963 300 : sol[alev] = linop.makeAlias(*a_sol[alev]);
964 300 : sol_is_alias[alev] = true;
965 : } else {
966 : amrex::Abort("Type conversion not supported for CFStrategy::ghostnodes");
967 : }
968 : }
969 : else
970 : {
971 0 : if (nGrowVect(*a_sol[alev]) == ng_sol) {
972 : if constexpr (std::is_same<AMF,MF>()) {
973 0 : sol[alev] = linop.makeAlias(*a_sol[alev]);
974 0 : sol_is_alias[alev] = true;
975 : }
976 : }
977 0 : if (!sol_is_alias[alev]) {
978 0 : if (!solve_called) {
979 0 : sol[alev] = linop.make(alev, 0, ng_sol);
980 : }
981 0 : LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
982 0 : setBndry(sol[alev], RT(0.0), 0, ncomp);
983 : }
984 : }
985 : }
986 :
987 300 : rhs.resize(namrlevs);
988 600 : for (int alev = 0; alev < namrlevs; ++alev)
989 : {
990 300 : if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs = IntVect(linop.getNGrow(alev)); }
991 300 : if (!solve_called) {
992 300 : rhs[alev] = linop.make(alev, 0, ng_rhs);
993 : }
994 300 : LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
995 300 : linop.applyMetricTerm(alev, 0, rhs[alev]);
996 300 : linop.unimposeNeumannBC(alev, rhs[alev]);
997 300 : linop.applyInhomogNeumannTerm(alev, rhs[alev]);
998 300 : linop.applyOverset(alev, rhs[alev]);
999 300 : linop.scaleRHS(alev, rhs[alev]);
1000 :
1001 : #ifdef AMREX_USE_EB
1002 : const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(linop.Factory(alev));
1003 : if (factory && !factory->isAllRegular()) {
1004 : if constexpr (std::is_same<MF,MultiFab>()) {
1005 : EB_set_covered(rhs[alev], 0, ncomp, 0, RT(0.0));
1006 : EB_set_covered(sol[alev], 0, ncomp, 0, RT(0.0));
1007 : } else {
1008 : amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1009 : }
1010 : }
1011 : #endif
1012 : }
1013 :
1014 300 : for (int falev = finest_amr_lev; falev > 0; --falev)
1015 : {
1016 0 : linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1017 : }
1018 :
1019 : // enforce solvability if appropriate
1020 300 : if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1021 : {
1022 0 : makeSolvable();
1023 : }
1024 :
1025 300 : IntVect ng = linop.getNGrowVectRestriction();
1026 300 : if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1027 300 : if (!solve_called) {
1028 300 : linop.make(res, ng);
1029 300 : linop.make(rescor, ng);
1030 : }
1031 600 : for (int alev = 0; alev <= finest_amr_lev; ++alev)
1032 : {
1033 300 : const int nmglevs = linop.NMGLevels(alev);
1034 900 : for (int mglev = 0; mglev < nmglevs; ++mglev)
1035 : {
1036 600 : setVal(res [alev][mglev], RT(0.0));
1037 600 : setVal(rescor[alev][mglev], RT(0.0));
1038 : }
1039 : }
1040 :
1041 300 : if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1042 300 : cor.resize(namrlevs);
1043 600 : for (int alev = 0; alev <= finest_amr_lev; ++alev)
1044 : {
1045 300 : const int nmglevs = linop.NMGLevels(alev);
1046 300 : cor[alev].resize(nmglevs);
1047 900 : for (int mglev = 0; mglev < nmglevs; ++mglev)
1048 : {
1049 600 : if (!solve_called) {
1050 600 : IntVect _ng = ng;
1051 600 : if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1052 600 : cor[alev][mglev] = linop.make(alev, mglev, _ng);
1053 : }
1054 600 : setVal(cor[alev][mglev], RT(0.0));
1055 : }
1056 : }
1057 :
1058 300 : cor_hold.resize(std::max(namrlevs-1,1));
1059 : {
1060 300 : const int alev = 0;
1061 300 : const int nmglevs = linop.NMGLevels(alev);
1062 300 : cor_hold[alev].resize(nmglevs);
1063 600 : for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1064 : {
1065 300 : if (!solve_called) {
1066 300 : IntVect _ng = ng;
1067 300 : if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1068 300 : cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1069 : }
1070 300 : setVal(cor_hold[alev][mglev], RT(0.0));
1071 : }
1072 : }
1073 300 : for (int alev = 1; alev < finest_amr_lev; ++alev)
1074 : {
1075 0 : cor_hold[alev].resize(1);
1076 0 : if (!solve_called) {
1077 0 : IntVect _ng = ng;
1078 0 : if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev)); }
1079 0 : cor_hold[alev][0] = linop.make(alev, 0, _ng);
1080 : }
1081 0 : setVal(cor_hold[alev][0], RT(0.0));
1082 : }
1083 :
1084 600 : if (linop.m_parent // no embedded N-Solve
1085 300 : || !linop.supportNSolve())
1086 : {
1087 300 : do_nsolve = false;
1088 : }
1089 :
1090 300 : if (do_nsolve && ns_linop == nullptr)
1091 : {
1092 0 : prepareForNSolve();
1093 : }
1094 :
1095 300 : if (verbose >= 2) {
1096 300 : amrex::Print() << "MLMG: # of AMR levels: " << namrlevs << "\n"
1097 600 : << " # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1098 300 : << "\n";
1099 300 : if (ns_linop) {
1100 0 : amrex::Print() << " # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) << "\n"
1101 0 : << " # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() << "\n";
1102 : }
1103 : }
1104 300 : }
1105 :
1106 : template <typename MF>
1107 : void
1108 : MLMGT<MF>::prepareLinOp ()
1109 : {
1110 : if (!linop_prepared) {
1111 : linop.prepareForSolve();
1112 : linop_prepared = true;
1113 : } else if (linop.needsUpdate()) {
1114 : linop.update();
1115 : }
1116 : }
1117 :
1118 : template <typename MF>
1119 : void
1120 : MLMGT<MF>::prepareForGMRES ()
1121 : {
1122 : prepareLinOp();
1123 : linop.prepareForGMRES();
1124 : }
1125 :
1126 : template <typename MF>
1127 : void
1128 : MLMGT<MF>::prepareMGcycle ()
1129 : {
1130 : if (res.empty()) {
1131 : IntVect ng = linop.getNGrowVectRestriction();
1132 : linop.make(res, ng);
1133 : linop.make(rescor, ng);
1134 :
1135 : for (int alev = 0; alev <= finest_amr_lev; ++alev)
1136 : {
1137 : const int nmglevs = linop.NMGLevels(alev);
1138 : for (int mglev = 0; mglev < nmglevs; ++mglev)
1139 : {
1140 : setVal(res [alev][mglev], RT(0.0));
1141 : setVal(rescor[alev][mglev], RT(0.0));
1142 : }
1143 : }
1144 :
1145 : IntVect ng_sol(1);
1146 : if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1147 : ng = ng_sol;
1148 :
1149 : cor.resize(namrlevs);
1150 : for (int alev = 0; alev <= finest_amr_lev; ++alev)
1151 : {
1152 : const int nmglevs = linop.NMGLevels(alev);
1153 : cor[alev].resize(nmglevs);
1154 : for (int mglev = 0; mglev < nmglevs; ++mglev)
1155 : {
1156 : cor[alev][mglev] = linop.make(alev, mglev, ng);
1157 : setVal(cor[alev][mglev], RT(0.0));
1158 : }
1159 : }
1160 :
1161 : cor_hold.resize(std::max(namrlevs-1,1));
1162 : {
1163 : const int alev = 0;
1164 : const int nmglevs = linop.NMGLevels(alev);
1165 : cor_hold[alev].resize(nmglevs);
1166 : for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1167 : {
1168 : cor_hold[alev][mglev] = linop.make(alev, mglev, ng);
1169 : setVal(cor_hold[alev][mglev], RT(0.0));
1170 : }
1171 : }
1172 : for (int alev = 1; alev < finest_amr_lev; ++alev)
1173 : {
1174 : cor_hold[alev].resize(1);
1175 : cor_hold[alev][0] = linop.make(alev, 0, ng);
1176 : setVal(cor_hold[alev][0], RT(0.0));
1177 : }
1178 : }
1179 : }
1180 :
1181 : template <typename MF>
1182 : void
1183 : MLMGT<MF>::prepareForNSolve ()
1184 : {
1185 : if constexpr (IsMultiFabLike_v<MF>) {
1186 : ns_linop = linop.makeNLinOp(nsolve_grid_size);
1187 :
1188 : int nghost = 0;
1189 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1190 :
1191 : const BoxArray& ba = (*ns_linop).m_grids[0][0];
1192 : const DistributionMapping& dm =(*ns_linop).m_dmap[0][0];
1193 :
1194 : int ng = 1;
1195 : if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1196 : ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1197 : ng = 0;
1198 : if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1199 : ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1200 : setVal(*ns_sol, RT(0.0));
1201 : setVal(*ns_rhs, RT(0.0));
1202 :
1203 : ns_linop->setLevelBC(0, ns_sol.get());
1204 :
1205 : ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1206 : ns_mlmg->setVerbose(0);
1207 : ns_mlmg->setFixedIter(1);
1208 : ns_mlmg->setMaxFmgIter(20);
1209 : ns_mlmg->setBottomSolver(BottomSolver::smoother);
1210 : }
1211 : }
1212 :
1213 : // in : Residual (res) on the finest AMR level
1214 : // out : sol on all AMR levels
1215 : template <typename MF>
1216 : void MLMGT<MF>::oneIter (int iter)
1217 : {
1218 : BL_PROFILE("MLMG::oneIter()");
1219 :
1220 : for (int alev = finest_amr_lev; alev > 0; --alev)
1221 : {
1222 : miniCycle(alev);
1223 :
1224 : IntVect nghost(0);
1225 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1226 : LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1227 :
1228 : // compute residual for the coarse AMR level
1229 : computeResWithCrseSolFineCor(alev-1,alev);
1230 :
1231 : if (alev != finest_amr_lev) {
1232 : std::swap(cor_hold[alev][0], cor[alev][0]); // save it for the up cycle
1233 : }
1234 : }
1235 :
1236 : // coarsest amr level
1237 : {
1238 : // enforce solvability if appropriate
1239 : if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1240 : {
1241 : makeSolvable(0,0,res[0][0]);
1242 : }
1243 :
1244 : if (iter < max_fmg_iters) {
1245 : mgFcycle();
1246 : } else {
1247 : mgVcycle(0, 0);
1248 : }
1249 :
1250 : IntVect nghost(0);
1251 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(0)); }
1252 : LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1253 : }
1254 :
1255 : for (int alev = 1; alev <= finest_amr_lev; ++alev)
1256 : {
1257 : // (Fine AMR correction) = I(Coarse AMR correction)
1258 : interpCorrection(alev);
1259 :
1260 : IntVect nghost(0);
1261 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1262 : LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1263 :
1264 : if (alev != finest_amr_lev) {
1265 : LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1266 : }
1267 :
1268 : // Update fine AMR level correction
1269 : computeResWithCrseCorFineCor(alev);
1270 :
1271 : miniCycle(alev);
1272 :
1273 : LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1274 :
1275 : if (alev != finest_amr_lev) {
1276 : LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1277 : }
1278 : }
1279 :
1280 : linop.averageDownAndSync(sol);
1281 : }
1282 :
1283 : template <typename MF>
1284 : void
1285 : MLMGT<MF>::miniCycle (int amrlev)
1286 : {
1287 : BL_PROFILE("MLMG::miniCycle()");
1288 : const int mglev = 0;
1289 : mgVcycle(amrlev, mglev);
1290 : }
1291 :
1292 : // in : Residual (res)
1293 : // out : Correction (cor) from bottom to this function's local top
1294 : template <typename MF>
1295 : void
1296 : MLMGT<MF>::mgVcycle (int amrlev, int mglev_top)
1297 : {
1298 : BL_PROFILE("MLMG::mgVcycle()");
1299 :
1300 : const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1301 :
1302 : for (int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1303 : {
1304 : BL_PROFILE_VAR("MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1305 :
1306 : if (verbose >= 4)
1307 : {
1308 : RT norm = norminf(res[amrlev][mglev],0,ncomp,IntVect(0));
1309 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev
1310 : << " DN: Norm before smooth " << norm << "\n";
1311 : }
1312 :
1313 : setVal(cor[amrlev][mglev], RT(0.0));
1314 : bool skip_fillboundary = true;
1315 : for (int i = 0; i < nu1; ++i) {
1316 : linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary);
1317 : skip_fillboundary = false;
1318 : }
1319 :
1320 : // rescor = res - L(cor)
1321 : computeResOfCorrection(amrlev, mglev);
1322 :
1323 : if (verbose >= 4)
1324 : {
1325 : RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1326 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev
1327 : << " DN: Norm after smooth " << norm << "\n";
1328 : }
1329 :
1330 : // res_crse = R(rescor_fine); this provides res/b to the level below
1331 : linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1332 : }
1333 :
1334 : BL_PROFILE_VAR("MLMG::mgVcycle_bottom", blp_bottom);
1335 : if (amrlev == 0)
1336 : {
1337 : if (verbose >= 4)
1338 : {
1339 : RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1340 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev_bottom
1341 : << " DN: Norm before bottom " << norm << "\n";
1342 : }
1343 : bottomSolve();
1344 : if (verbose >= 4)
1345 : {
1346 : computeResOfCorrection(amrlev, mglev_bottom);
1347 : RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1348 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev_bottom
1349 : << " UP: Norm after bottom " << norm << "\n";
1350 : }
1351 : }
1352 : else
1353 : {
1354 : if (verbose >= 4)
1355 : {
1356 : RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1357 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev_bottom
1358 : << " Norm before smooth " << norm << "\n";
1359 : }
1360 : setVal(cor[amrlev][mglev_bottom], RT(0.0));
1361 : bool skip_fillboundary = true;
1362 : for (int i = 0; i < nu1; ++i) {
1363 : linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1364 : res[amrlev][mglev_bottom], skip_fillboundary);
1365 : skip_fillboundary = false;
1366 : }
1367 : if (verbose >= 4)
1368 : {
1369 : computeResOfCorrection(amrlev, mglev_bottom);
1370 : RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1371 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev_bottom
1372 : << " Norm after smooth " << norm << "\n";
1373 : }
1374 : }
1375 : BL_PROFILE_VAR_STOP(blp_bottom);
1376 :
1377 : for (int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1378 : {
1379 : BL_PROFILE_VAR("MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1380 : // cor_fine += I(cor_crse)
1381 : addInterpCorrection(amrlev, mglev);
1382 : if (verbose >= 4)
1383 : {
1384 : computeResOfCorrection(amrlev, mglev);
1385 : RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1386 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev
1387 : << " UP: Norm before smooth " << norm << "\n";
1388 : }
1389 : for (int i = 0; i < nu2; ++i) {
1390 : linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev]);
1391 : }
1392 :
1393 : if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1394 :
1395 : if (verbose >= 4)
1396 : {
1397 : computeResOfCorrection(amrlev, mglev);
1398 : RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1399 : amrex::Print() << "AT LEVEL " << amrlev << " " << mglev
1400 : << " UP: Norm after smooth " << norm << "\n";
1401 : }
1402 : }
1403 : }
1404 :
1405 : // FMG cycle on the coarsest AMR level.
1406 : // in: Residual on the top MG level (i.e., 0)
1407 : // out: Correction (cor) on all MG levels
1408 : template <typename MF>
1409 : void
1410 : MLMGT<MF>::mgFcycle ()
1411 : {
1412 : BL_PROFILE("MLMG::mgFcycle()");
1413 :
1414 : #ifdef AMREX_USE_EB
1415 : AMREX_ASSERT(linop.isCellCentered());
1416 : #endif
1417 :
1418 : const int amrlev = 0;
1419 : const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1420 : IntVect nghost(0);
1421 : if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(amrlev)); }
1422 :
1423 : for (int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1424 : {
1425 : linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1426 : }
1427 :
1428 : bottomSolve();
1429 :
1430 : for (int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1431 : {
1432 : // cor_fine = I(cor_crse)
1433 : interpCorrection(amrlev, mglev);
1434 :
1435 : // rescor = res - L(cor)
1436 : computeResOfCorrection(amrlev, mglev);
1437 : // res = rescor; this provides b to the vcycle below
1438 : LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1439 :
1440 : // save cor; do v-cycle; add the saved to cor
1441 : std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1442 : mgVcycle(amrlev, mglev);
1443 : LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1444 : }
1445 : }
1446 :
1447 : // At the true bottom of the coarsest AMR level.
1448 : // in : Residual (res) as b
1449 : // out : Correction (cor) as x
1450 : template <typename MF>
1451 : void
1452 : MLMGT<MF>::bottomSolve ()
1453 : {
1454 : if (do_nsolve)
1455 : {
1456 : NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1457 : }
1458 : else
1459 : {
1460 : actualBottomSolve();
1461 : }
1462 : }
1463 :
1464 : template <typename MF>
1465 : void
1466 : MLMGT<MF>::NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs)
1467 : {
1468 : BL_PROFILE("MLMG::NSolve()");
1469 :
1470 : setVal(a_sol, RT(0.0));
1471 :
1472 : MF const& res_bottom = res[0].back();
1473 : if (BoxArray::SameRefs(boxArray(a_rhs),boxArray(res_bottom)) &&
1474 : DistributionMapping::SameRefs(DistributionMap(a_rhs),DistributionMap(res_bottom)))
1475 : {
1476 : LocalCopy(a_rhs, res_bottom, 0, 0, ncomp, IntVect(0));
1477 : } else {
1478 : setVal(a_rhs, RT(0.0));
1479 : ParallelCopy(a_rhs, res_bottom, 0, 0, ncomp);
1480 : }
1481 :
1482 : a_solver.solve(Vector<MF*>{&a_sol}, Vector<MF const*>{&a_rhs},
1483 : RT(-1.0), RT(-1.0));
1484 :
1485 : linop.copyNSolveSolution(cor[0].back(), a_sol);
1486 : }
1487 :
1488 : template <typename MF>
1489 : void
1490 : MLMGT<MF>::actualBottomSolve ()
1491 : {
1492 : BL_PROFILE("MLMG::actualBottomSolve()");
1493 :
1494 : if (!linop.isBottomActive()) { return; }
1495 :
1496 : auto bottom_start_time = amrex::second();
1497 :
1498 : ParallelContext::push(linop.BottomCommunicator());
1499 :
1500 : const int amrlev = 0;
1501 : const int mglev = linop.NMGLevels(amrlev) - 1;
1502 : auto& x = cor[amrlev][mglev];
1503 : auto& b = res[amrlev][mglev];
1504 :
1505 : setVal(x, RT(0.0));
1506 :
1507 : if (bottom_solver == BottomSolver::smoother)
1508 : {
1509 : bool skip_fillboundary = true;
1510 : for (int i = 0; i < nuf; ++i) {
1511 : linop.smooth(amrlev, mglev, x, b, skip_fillboundary);
1512 : skip_fillboundary = false;
1513 : }
1514 : }
1515 : else
1516 : {
1517 : MF* bottom_b = &b;
1518 : MF raii_b;
1519 : if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1520 : {
1521 : const IntVect ng = nGrowVect(b);
1522 : raii_b = linop.make(amrlev, mglev, ng);
1523 : LocalCopy(raii_b, b, 0, 0, ncomp, ng);
1524 : bottom_b = &raii_b;
1525 :
1526 : makeSolvable(amrlev,mglev,*bottom_b);
1527 : }
1528 :
1529 : if (bottom_solver == BottomSolver::hypre)
1530 : {
1531 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1532 : if constexpr (std::is_same<MF,MultiFab>()) {
1533 : bottomSolveWithHypre(x, *bottom_b);
1534 : } else
1535 : #endif
1536 : {
1537 : amrex::Abort("Using Hypre as bottom solver not supported in this case");
1538 : }
1539 : }
1540 : else if (bottom_solver == BottomSolver::petsc)
1541 : {
1542 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1543 : if constexpr (std::is_same<MF,MultiFab>()) {
1544 : bottomSolveWithPETSc(x, *bottom_b);
1545 : } else
1546 : #endif
1547 : {
1548 : amrex::Abort("Using PETSc as bottom solver not supported in this case");
1549 : }
1550 : }
1551 : else
1552 : {
1553 : typename MLCGSolverT<MF>::Type cg_type;
1554 : if (bottom_solver == BottomSolver::cg ||
1555 : bottom_solver == BottomSolver::cgbicg) {
1556 : cg_type = MLCGSolverT<MF>::Type::CG;
1557 : } else {
1558 : cg_type = MLCGSolverT<MF>::Type::BiCGStab;
1559 : }
1560 :
1561 : int ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1562 :
1563 : if (ret != 0 && (bottom_solver == BottomSolver::cgbicg ||
1564 : bottom_solver == BottomSolver::bicgcg))
1565 : {
1566 : if (bottom_solver == BottomSolver::cgbicg) {
1567 : cg_type = MLCGSolverT<MF>::Type::BiCGStab; // switch to bicg
1568 : } else {
1569 : cg_type = MLCGSolverT<MF>::Type::CG; // switch to cg
1570 : }
1571 : setVal(cor[amrlev][mglev], RT(0.0));
1572 : ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1573 : if (ret == 0) { // switch permanently
1574 : if (cg_type == MLCGSolverT<MF>::Type::CG) {
1575 : bottom_solver = BottomSolver::cg;
1576 : } else {
1577 : bottom_solver = BottomSolver::bicgstab;
1578 : }
1579 : }
1580 : }
1581 :
1582 : // If the bottom solve failed then set the correction to zero
1583 : if (ret != 0 && ret != 9) {
1584 : setVal(cor[amrlev][mglev], RT(0.0));
1585 : }
1586 : const int n = (ret==0) ? nub : nuf;
1587 : for (int i = 0; i < n; ++i) {
1588 : linop.smooth(amrlev, mglev, x, b);
1589 : }
1590 : }
1591 : }
1592 :
1593 : ParallelContext::pop();
1594 :
1595 : if (! timer.empty()) {
1596 : timer[bottom_time] += amrex::second() - bottom_start_time;
1597 : }
1598 : }
1599 :
1600 : template <typename MF>
1601 : int
1602 : MLMGT<MF>::bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type)
1603 : {
1604 : MLCGSolverT<MF> cg_solver(linop);
1605 : cg_solver.setSolver(type);
1606 : cg_solver.setVerbose(bottom_verbose);
1607 : cg_solver.setMaxIter(bottom_maxiter);
1608 : cg_solver.setInitSolnZeroed(true);
1609 : if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.setNGhost(linop.getNGrow()); }
1610 :
1611 : int ret = cg_solver.solve(x, b, bottom_reltol, bottom_abstol);
1612 : if (ret != 0 && verbose > 1) {
1613 : amrex::Print() << "MLMG: Bottom solve failed.\n";
1614 : }
1615 : m_niters_cg.push_back(cg_solver.getNumIters());
1616 : return ret;
1617 : }
1618 :
1619 : // Compute multi-level Residual (res) up to amrlevmax.
1620 : template <typename MF>
1621 : void
1622 : MLMGT<MF>::computeMLResidual (int amrlevmax)
1623 : {
1624 : BL_PROFILE("MLMG::computeMLResidual()");
1625 :
1626 : const int mglev = 0;
1627 : for (int alev = amrlevmax; alev >= 0; --alev) {
1628 : const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1629 : linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1630 : if (alev < finest_amr_lev) {
1631 : linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1632 : res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1633 : }
1634 : }
1635 : }
1636 :
1637 : // Compute single AMR level residual without masking.
1638 : template <typename MF>
1639 : void
1640 : MLMGT<MF>::computeResidual (int alev)
1641 : {
1642 : BL_PROFILE("MLMG::computeResidual()");
1643 : const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1644 : linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1645 : }
1646 :
1647 : // Compute coarse AMR level composite residual with coarse solution and fine correction
1648 : template <typename MF>
1649 : void
1650 : MLMGT<MF>::computeResWithCrseSolFineCor (int calev, int falev)
1651 : {
1652 : BL_PROFILE("MLMG::computeResWithCrseSolFineCor()");
1653 :
1654 : IntVect nghost(0);
1655 : if (cf_strategy == CFStrategy::ghostnodes) {
1656 : nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1657 : }
1658 :
1659 : MF& crse_sol = sol[calev];
1660 : const MF& crse_rhs = rhs[calev];
1661 : MF& crse_res = res[calev][0];
1662 :
1663 : MF& fine_sol = sol[falev];
1664 : const MF& fine_rhs = rhs[falev];
1665 : MF& fine_cor = cor[falev][0];
1666 : MF& fine_res = res[falev][0];
1667 : MF& fine_rescor = rescor[falev][0];
1668 :
1669 : const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) : nullptr;
1670 : linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1671 :
1672 : linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1673 : LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1674 :
1675 : linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1676 :
1677 : linop.avgDownResAmr(calev, crse_res, fine_res);
1678 : }
1679 :
1680 : // Compute fine AMR level residual fine_res = fine_res - L(fine_cor) with coarse providing BC.
1681 : template <typename MF>
1682 : void
1683 : MLMGT<MF>::computeResWithCrseCorFineCor (int falev)
1684 : {
1685 : BL_PROFILE("MLMG::computeResWithCrseCorFineCor()");
1686 :
1687 : IntVect nghost(0);
1688 : if (cf_strategy == CFStrategy::ghostnodes) {
1689 : nghost = IntVect(linop.getNGrow(falev));
1690 : }
1691 :
1692 : const MF& crse_cor = cor[falev-1][0];
1693 :
1694 : MF& fine_cor = cor [falev][0];
1695 : MF& fine_res = res [falev][0];
1696 : MF& fine_rescor = rescor[falev][0];
1697 :
1698 : // fine_rescor = fine_res - L(fine_cor)
1699 : linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1700 : BCMode::Inhomogeneous, &crse_cor);
1701 : LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1702 : }
1703 :
1704 : // Interpolate correction from coarse to fine AMR level.
1705 : template <typename MF>
1706 : void
1707 : MLMGT<MF>::interpCorrection (int alev)
1708 : {
1709 : BL_PROFILE("MLMG::interpCorrection_1");
1710 :
1711 : IntVect nghost(0);
1712 : if (cf_strategy == CFStrategy::ghostnodes) {
1713 : nghost = IntVect(linop.getNGrow(alev));
1714 : }
1715 :
1716 : MF const& crse_cor = cor[alev-1][0];
1717 : MF & fine_cor = cor[alev ][0];
1718 :
1719 : const Geometry& crse_geom = linop.Geom(alev-1,0);
1720 :
1721 : int ng_src = 0;
1722 : int ng_dst = linop.isCellCentered() ? 1 : 0;
1723 : if (cf_strategy == CFStrategy::ghostnodes)
1724 : {
1725 : ng_src = linop.getNGrow(alev-1);
1726 : ng_dst = linop.getNGrow(alev-1);
1727 : }
1728 :
1729 : MF cfine = linop.makeCoarseAmr(alev, IntVect(ng_dst));
1730 : setVal(cfine, RT(0.0));
1731 : ParallelCopy(cfine, crse_cor, 0, 0, ncomp, IntVect(ng_src), IntVect(ng_dst),
1732 : crse_geom.periodicity());
1733 :
1734 : linop.interpolationAmr(alev, fine_cor, cfine, nghost); // NOLINT(readability-suspicious-call-argument)
1735 : }
1736 :
1737 : // Interpolate correction between MG levels
1738 : // inout: Correction (cor) on coarse MG lev. (out due to FillBoundary)
1739 : // out : Correction (cor) on fine MG lev.
1740 : template <typename MF>
1741 : void
1742 : MLMGT<MF>::interpCorrection (int alev, int mglev)
1743 : {
1744 : BL_PROFILE("MLMG::interpCorrection_2");
1745 :
1746 : MF& crse_cor = cor[alev][mglev+1];
1747 : MF& fine_cor = cor[alev][mglev ];
1748 : linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1749 : }
1750 :
1751 : // (Fine MG level correction) += I(Coarse MG level correction)
1752 : template <typename MF>
1753 : void
1754 : MLMGT<MF>::addInterpCorrection (int alev, int mglev)
1755 : {
1756 : BL_PROFILE("MLMG::addInterpCorrection()");
1757 :
1758 : const MF& crse_cor = cor[alev][mglev+1];
1759 : MF& fine_cor = cor[alev][mglev ];
1760 :
1761 : MF cfine;
1762 : const MF* cmf;
1763 :
1764 : if (linop.isMFIterSafe(alev, mglev, mglev+1))
1765 : {
1766 : cmf = &crse_cor;
1767 : }
1768 : else
1769 : {
1770 : cfine = linop.makeCoarseMG(alev, mglev, IntVect(0));
1771 : ParallelCopy(cfine, crse_cor, 0, 0, ncomp);
1772 : cmf = &cfine;
1773 : }
1774 :
1775 : linop.interpolation(alev, mglev, fine_cor, *cmf);
1776 : }
1777 :
1778 : // Compute rescor = res - L(cor)
1779 : // in : res
1780 : // inout: cor (out due to FillBoundary in linop.correctionResidual)
1781 : // out : rescor
1782 : template <typename MF>
1783 : void
1784 : MLMGT<MF>::computeResOfCorrection (int amrlev, int mglev)
1785 : {
1786 : BL_PROFILE("MLMG:computeResOfCorrection()");
1787 : MF & x = cor[amrlev][mglev];
1788 : const MF& b = res[amrlev][mglev];
1789 : MF & r = rescor[amrlev][mglev];
1790 : linop.correctionResidual(amrlev, mglev, r, x, b, BCMode::Homogeneous);
1791 : }
1792 :
1793 : // Compute single-level masked inf-norm of Residual (res).
1794 : template <typename MF>
1795 : auto
1796 : MLMGT<MF>::ResNormInf (int alev, bool local) -> RT
1797 : {
1798 : BL_PROFILE("MLMG::ResNormInf()");
1799 : return linop.normInf(alev, res[alev][0], local);
1800 : }
1801 :
1802 : // Computes multi-level masked inf-norm of Residual (res).
1803 : template <typename MF>
1804 : auto
1805 : MLMGT<MF>::MLResNormInf (int alevmax, bool local) -> RT
1806 : {
1807 : BL_PROFILE("MLMG::MLResNormInf()");
1808 : RT r = RT(0.0);
1809 : for (int alev = 0; alev <= alevmax; ++alev)
1810 : {
1811 : r = std::max(r, ResNormInf(alev,true));
1812 : }
1813 : if (!local) { ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); }
1814 : return r;
1815 : }
1816 :
1817 : // Compute multi-level masked inf-norm of RHS (rhs).
1818 : template <typename MF>
1819 : auto
1820 : MLMGT<MF>::MLRhsNormInf (bool local) -> RT
1821 : {
1822 : BL_PROFILE("MLMG::MLRhsNormInf()");
1823 : RT r = RT(0.0);
1824 : for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1825 : auto t = linop.normInf(alev, rhs[alev], true);
1826 : r = std::max(r, t);
1827 : }
1828 : if (!local) { ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); }
1829 : return r;
1830 : }
1831 :
1832 : template <typename MF>
1833 : void
1834 : MLMGT<MF>::makeSolvable ()
1835 : {
1836 : auto const& offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1837 : if (verbose >= 4) {
1838 : for (int c = 0; c < ncomp; ++c) {
1839 : amrex::Print() << "MLMG: Subtracting " << offset[c] << " from rhs component "
1840 : << c << "\n";
1841 : }
1842 : }
1843 : for (int alev = 0; alev < namrlevs; ++alev) {
1844 : linop.fixSolvabilityByOffset(alev, 0, rhs[alev], offset);
1845 : }
1846 : }
1847 :
1848 : template <typename MF>
1849 : void
1850 : MLMGT<MF>::makeSolvable (int amrlev, int mglev, MF& mf)
1851 : {
1852 : auto const& offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1853 : if (verbose >= 4) {
1854 : for (int c = 0; c < ncomp; ++c) {
1855 : amrex::Print() << "MLMG: Subtracting " << offset[c]
1856 : << " from mf component c = " << c
1857 : << " on level (" << amrlev << ", " << mglev << ")\n";
1858 : }
1859 : }
1860 : linop.fixSolvabilityByOffset(amrlev, mglev, mf, offset);
1861 : }
1862 :
1863 : #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1864 : template <typename MF>
1865 : template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1866 : void
1867 : MLMGT<MF>::bottomSolveWithHypre (MF& x, const MF& b)
1868 : {
1869 : const int amrlev = 0;
1870 : const int mglev = linop.NMGLevels(amrlev) - 1;
1871 :
1872 : AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithHypre doesn't work with ncomp > 1");
1873 :
1874 : if (linop.isCellCentered())
1875 : {
1876 : if (hypre_solver == nullptr) // We should reuse the setup
1877 : {
1878 : hypre_solver = linop.makeHypre(hypre_interface);
1879 :
1880 : hypre_solver->setVerbose(bottom_verbose);
1881 : if (hypre_interface == amrex::Hypre::Interface::ij) {
1882 : hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1883 : } else {
1884 : hypre_solver->setHypreOldDefault(hypre_old_default);
1885 : hypre_solver->setHypreRelaxType(hypre_relax_type);
1886 : hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1887 : hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1888 : hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1889 : }
1890 :
1891 : const BoxArray& ba = linop.m_grids[amrlev].back();
1892 : const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1893 : const Geometry& geom = linop.m_geom[amrlev].back();
1894 :
1895 : hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1896 : hypre_bndry->setHomogValues();
1897 : const Real* dx = linop.m_geom[0][0].CellSize();
1898 : IntVect crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1899 : RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1900 : 0.5*dx[1]*crse_ratio[1],
1901 : 0.5*dx[2]*crse_ratio[2]));
1902 : hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1903 : linop.m_coarse_fine_bc_type);
1904 : }
1905 :
1906 : // IJ interface understands absolute tolerance API of hypre
1907 : amrex::Real hypre_abstol =
1908 : (hypre_interface == amrex::Hypre::Interface::ij)
1909 : ? bottom_abstol : Real(-1.0);
1910 : hypre_solver->solve(
1911 : x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1912 : linop.getMaxOrder());
1913 : }
1914 : else
1915 : {
1916 : if (hypre_node_solver == nullptr)
1917 : {
1918 : hypre_node_solver =
1919 : linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1920 : }
1921 : hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1922 : }
1923 :
1924 : // For singular problems there may be a large constant added to all values of the solution
1925 : // For precision reasons we enforce that the average of the correction from hypre is 0
1926 : if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1927 : {
1928 : makeSolvable(amrlev, mglev, x);
1929 : }
1930 : }
1931 : #endif
1932 :
1933 : #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1934 : template <typename MF>
1935 : template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1936 : void
1937 : MLMGT<MF>::bottomSolveWithPETSc (MF& x, const MF& b)
1938 : {
1939 : AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithPETSc doesn't work with ncomp > 1");
1940 :
1941 : if(petsc_solver == nullptr)
1942 : {
1943 : petsc_solver = linop.makePETSc();
1944 : petsc_solver->setVerbose(bottom_verbose);
1945 :
1946 : const BoxArray& ba = linop.m_grids[0].back();
1947 : const DistributionMapping& dm = linop.m_dmap[0].back();
1948 : const Geometry& geom = linop.m_geom[0].back();
1949 :
1950 : petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1951 : petsc_bndry->setHomogValues();
1952 : const Real* dx = linop.m_geom[0][0].CellSize();
1953 : auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1954 : RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1955 : 0.5*dx[1]*crse_ratio[1],
1956 : 0.5*dx[2]*crse_ratio[2]));
1957 : petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1958 : linop.m_coarse_fine_bc_type);
1959 : }
1960 : petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
1961 : linop.getMaxOrder());
1962 : }
1963 : #endif
1964 :
1965 : template <typename MF>
1966 : void
1967 : MLMGT<MF>::checkPoint (const Vector<MultiFab*>& a_sol,
1968 : const Vector<MultiFab const*>& a_rhs,
1969 : RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const
1970 : {
1971 : std::string file_name(a_file_name);
1972 : UtilCreateCleanDirectory(file_name, false);
1973 :
1974 : if (ParallelContext::IOProcessorSub())
1975 : {
1976 : std::string HeaderFileName(std::string(a_file_name)+"/Header");
1977 : std::ofstream HeaderFile;
1978 : HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
1979 : std::ofstream::trunc |
1980 : std::ofstream::binary);
1981 : if( ! HeaderFile.good()) {
1982 : FileOpenFailed(HeaderFileName);
1983 : }
1984 :
1985 : HeaderFile.precision(17);
1986 :
1987 : HeaderFile << linop.name() << "\n"
1988 : << "a_tol_rel = " << a_tol_rel << "\n"
1989 : << "a_tol_abs = " << a_tol_abs << "\n"
1990 : << "verbose = " << verbose << "\n"
1991 : << "max_iters = " << max_iters << "\n"
1992 : << "nu1 = " << nu1 << "\n"
1993 : << "nu2 = " << nu2 << "\n"
1994 : << "nuf = " << nuf << "\n"
1995 : << "nub = " << nub << "\n"
1996 : << "max_fmg_iters = " << max_fmg_iters << "\n"
1997 : << "bottom_solver = " << static_cast<int>(bottom_solver) << "\n"
1998 : << "bottom_verbose = " << bottom_verbose << "\n"
1999 : << "bottom_maxiter = " << bottom_maxiter << "\n"
2000 : << "bottom_reltol = " << bottom_reltol << "\n"
2001 : << "always_use_bnorm = " << always_use_bnorm << "\n"
2002 : << "namrlevs = " << namrlevs << "\n"
2003 : << "finest_amr_lev = " << finest_amr_lev << "\n"
2004 : << "linop_prepared = " << linop_prepared << "\n"
2005 : << "solve_called = " << solve_called << "\n";
2006 :
2007 : for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2008 : UtilCreateCleanDirectory(file_name+"/Level_"+std::to_string(ilev), false);
2009 : }
2010 : }
2011 :
2012 : ParallelContext::BarrierSub();
2013 :
2014 : for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2015 : VisMF::Write(*a_sol[ilev], file_name+"/Level_"+std::to_string(ilev)+"/sol");
2016 : VisMF::Write(*a_rhs[ilev], file_name+"/Level_"+std::to_string(ilev)+"/rhs");
2017 : }
2018 :
2019 : linop.checkPoint(file_name+"/linop");
2020 : }
2021 :
2022 : extern template class MLMGT<MultiFab>;
2023 :
2024 : using MLMG = MLMGT<MultiFab>;
2025 :
2026 : }
2027 :
2028 : #endif
|