2 #include <AMReX_MLNodeLinOp.H>
3 #include <AMReX_MLCellLinOp.H>
4 #include <AMReX_MLNodeLap_K.H>
5 #include <AMReX_MultiFabUtil.H>
10 using namespace amrex;
14 constexpr amrex::IntVect
AMREX_D_DECL(Operator<Grid::Cell>::dx, Operator<Grid::Cell>::dy, Operator<Grid::Cell>::dz);
19 if (!recompute && m_diagonal_computed)
return;
20 m_diagonal_computed =
true;
22 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
24 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
26 Diagonal(amrlev, mglev, *m_diag[amrlev][mglev]);
33 BL_PROFILE(
"Operator::Diagonal()");
36 int ncomp = diag.nComp();
40 int num = AMREX_D_TERM(sep, *sep, *sep);
43 amrex::MultiFab x(m_diag[amrlev][mglev]->boxArray(), m_diag[amrlev][mglev]->DistributionMap(), ncomp, nghost);
44 amrex::MultiFab Ax(m_diag[amrlev][mglev]->boxArray(), m_diag[amrlev][mglev]->DistributionMap(), ncomp, nghost);
46 for (MFIter mfi(x,
false); mfi.isValid(); ++mfi)
48 const Box& bx = mfi.validbox();
49 amrex::FArrayBox& diagfab = diag[mfi];
50 amrex::FArrayBox& xfab = x[mfi];
51 amrex::FArrayBox& Axfab = Ax[mfi];
55 for (
int i = 0; i < num; i++)
57 for (
int n = 0; n < ncomp; n++)
63 AMREX_D_TERM(
for (
int m1 = bx.loVect()[0]; m1 <= bx.hiVect()[0]; m1++),
64 for (
int m2 = bx.loVect()[1]; m2 <= bx.hiVect()[1]; m2++),
65 for (
int m3 = bx.loVect()[2]; m3 <= bx.hiVect()[2]; m3++))
69 if (m1 % sep == i / sep && m2 % sep == i % sep) xfab(m, n) = 1.0;
70 else xfab(m, n) = 0.0;
74 BL_PROFILE_VAR(
"Operator::Part2", part2);
76 Fapply(amrlev, mglev, Ax, x);
77 BL_PROFILE_VAR_STOP(part2);
80 Axfab.mult(xfab, n, n, 1);
81 diagfab.plus(Axfab, n, n, 1);
90 BL_PROFILE(
"Operator::Fsmooth()");
92 amrex::Box domain(m_geom[amrlev][mglev].Domain());
94 int ncomp = b.nComp();
98 amrex::MultiFab Ax(x.boxArray(), x.DistributionMap(), ncomp, nghost);
99 amrex::MultiFab Dx(x.boxArray(), x.DistributionMap(), ncomp, nghost);
100 amrex::MultiFab Rx(x.boxArray(), x.DistributionMap(), ncomp, nghost);
102 if (!m_diagonal_computed)
Util::Abort(
INFO,
"Operator::Diagonal() must be called before using Fsmooth");
106 for (
int ctr = 0; ctr < 2; ctr++)
108 Fapply(amrlev, mglev, Ax, x);
110 amrex::MultiFab::Copy(Dx, x, 0, 0, ncomp, nghost);
111 amrex::MultiFab::Multiply(Dx, *m_diag[amrlev][mglev], 0, 0, ncomp, nghost);
113 amrex::MultiFab::Copy(Rx, Ax, 0, 0, ncomp, nghost);
114 amrex::MultiFab::Subtract(Rx, Dx, 0, 0, ncomp, nghost);
116 for (MFIter mfi(x,
false); mfi.isValid(); ++mfi)
118 const Box& bx = mfi.validbox();
119 amrex::FArrayBox& xfab = x[mfi];
120 const amrex::FArrayBox& bfab = b[mfi];
122 const amrex::FArrayBox& Rxfab = Rx[mfi];
123 const amrex::FArrayBox& diagfab = (*m_diag[amrlev][mglev])[mfi];
125 for (
int n = 0; n < ncomp; n++)
127 AMREX_D_TERM(
for (
int m1 = bx.loVect()[0] - 2; m1 <= bx.hiVect()[0] + 2; m1++),
128 for (
int m2 = bx.loVect()[1] - 2; m2 <= bx.hiVect()[1] + 2; m2++),
129 for (
int m3 = bx.loVect()[2] - 2; m3 <= bx.hiVect()[2] + 2; m3++))
135 if (AMREX_D_TERM(m[0] < domain.loVect()[0], ||
136 m[1] < domain.loVect()[1], ||
137 m[2] < domain.loVect()[2]))
continue;
138 if (AMREX_D_TERM(m[0] > domain.hiVect()[0] + 1, ||
139 m[1] > domain.hiVect()[1] + 1, ||
140 m[2] > domain.hiVect()[2] + 1))
continue;
142 if (AMREX_D_TERM(m[0] == bx.loVect()[0] - nghost || m[0] == bx.hiVect()[0] + nghost, ||
143 m[1] == bx.loVect()[1] - nghost || m[1] == bx.hiVect()[1] + nghost, ||
144 m[2] == bx.loVect()[2] - nghost || m[2] == bx.hiVect()[2] + nghost))
151 xfab(m, n) = (1. - m_omega) * xfab(m, n) + m_omega * (bfab(m, n) - Rxfab(m, n)) / diagfab(m, n);
156 amrex::Geometry geom = m_geom[amrlev][mglev];
157 realFillBoundary(x, geom);
158 nodalSync(amrlev, mglev, x);
163 BL_PROFILE(
"Operator::normalize()");
164 amrex::Box domain(m_geom[amrlev][mglev].Domain());
165 domain.convert(amrex::IntVect::TheNodeVector());
167 int ncomp = getNComp();
170 if (!m_diagonal_computed)
171 Util::Abort(
INFO,
"Operator::Diagonal() must be called before using normalize");
173 for (MFIter mfi(a_x, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
176 Box bx = mfi.tilebox();
180 amrex::Array4<amrex::Real>
const& x = a_x.array(mfi);
181 amrex::Array4<const amrex::Real>
const& diag = m_diag[amrlev][mglev]->array(mfi);
183 for (
int n = 0; n < ncomp; n++)
185 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) {
187 x(i, j, k) /= diag(i, j, k);
195 const Vector<BoxArray>& a_grids,
196 const Vector<DistributionMapping>& a_dmap,
197 const LPInfo& a_info,
198 const Vector<FabFactory<FArrayBox>
const*>& a_factory)
200 BL_PROFILE(
"Operator::Operator()");
203 if (!(a_grids[0].ixType() == amrex::IndexType::TheNodeType()))
204 Util::Abort(
INFO,
"Operator must be defined using CELL CENTERED boxarrays.");
206 define(a_geom, a_grids, a_dmap, a_info, a_factory);
213 const Vector<BoxArray>& a_grids,
214 const Vector<DistributionMapping>& a_dmap,
215 const LPInfo& a_info,
216 const Vector<FabFactory<FArrayBox>
const*>& a_factory)
218 BL_PROFILE(
"Operator::~Operator()");
221 if (amrex::ParallelDescriptor::NProcs() > a_grids[0].size())
223 Util::Warning(
INFO,
"There are more processors than there are boxes in the amrlev=0 boxarray!!\n",
224 "(NProcs = ", amrex::ParallelDescriptor::NProcs(),
", a_grids[0].size() = ", a_grids[0].size(),
")\n",
225 "You should decrease max_grid_size or you will not get proper scaling!");
229 Vector<BoxArray> cc_grids = a_grids;
230 for (
auto& ba : cc_grids) {
234 MLNodeLinOp::define(a_geom, a_grids, a_dmap, a_info, a_factory);
238 m_diag.resize(m_num_amr_levels);
239 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
241 m_diag[amrlev].resize(m_num_mg_levels[amrlev]);
243 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
245 m_diag[amrlev][mglev].reset(
new MultiFab(amrex::convert(m_grids[amrlev][mglev], amrex::IntVect::TheNodeVector()),
246 m_dmap[amrlev][mglev], getNComp(), nghost));
253 m_lobc.resize(getNComp(), { {
AMREX_D_DECL(BCType::bogus,BCType::bogus,BCType::bogus)} });
254 m_hibc.resize(getNComp(), { {
AMREX_D_DECL(BCType::bogus,BCType::bogus,BCType::bogus)} });
259 BL_PROFILE(
"Operator::fixUpResidualMask()");
261 if (!m_masks_built) buildMasks();
263 const iMultiFab& cfmask = *m_nd_fine_mask[amrlev];
266 #pragma omp parallel if (Gpu::notInLaunchRegion())
268 for (MFIter mfi(resmsk, TilingIfNotGPU()); mfi.isValid(); ++mfi)
270 const Box& bx = mfi.tilebox();
271 Array4<int>
const& rmsk = resmsk.array(mfi);
272 Array4<int const>
const& fmsk = cfmask.const_array(mfi);
273 AMREX_HOST_DEVICE_PARALLEL_FOR_3D(bx, i, j, k,
275 if (fmsk(i,j,k) == amrex::nodelap_detail::crse_fine_node) rmsk(i,j,k) = 1;
282 BL_PROFILE(
"Operator::prepareForSolve()");
283 MLNodeLinOp::prepareForSolve();
291 BL_PROFILE(
"Operator::restriction()");
293 applyBC(amrlev, cmglev - 1, fine, BCMode::Homogeneous, StateMode::Solution);
295 amrex::Box cdomain = m_geom[amrlev][cmglev].Domain();
296 cdomain.convert(amrex::IntVect::TheNodeVector());
298 bool need_parallel_copy = !amrex::isMFIterSafe(crse, fine);
300 if (need_parallel_copy) {
301 const BoxArray& ba = amrex::coarsen(fine.boxArray(), 2);
302 cfine.define(ba, fine.DistributionMap(), fine.nComp(), fine.nGrow());
305 MultiFab* pcrse = (need_parallel_copy) ? &cfine : &crse;
307 for (MFIter mfi(*pcrse, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
309 const Box& bx = mfi.tilebox();
311 amrex::Array4<const amrex::Real>
const& fdata = fine.array(mfi);
312 amrex::Array4<amrex::Real>
const& cdata = pcrse->array(mfi);
314 const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
317 for (
int n = 0; n < crse.nComp(); n++)
321 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int I,
int J,
int K) {
322 int i = 2 * I, j = 2 * J, k = 2 * K;
324 if ((I == lo.x || I == hi.x) &&
325 (J == lo.y || J == hi.y) &&
326 (K == lo.z || K == hi.z))
328 cdata(I, J, K, n) = fdata(i, j, k, n);
330 else if ((J == lo.y || J == hi.y) &&
331 (K == lo.z || K == hi.z))
333 cdata(I, J, K, n) = 0.25 * fdata(i - 1, j, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i + 1, j, k, n);
335 else if ((K == lo.z || K == hi.z) &&
336 (I == lo.x || I == hi.x))
338 cdata(I, J, K, n) = 0.25 * fdata(i, j - 1, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j + 1, k, n);
340 else if ((I == lo.x || I == hi.x) &&
341 (J == lo.y || J == hi.y))
343 cdata(I, J, K, n) = 0.25 * fdata(i, j, k - 1, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j, k + 1, n);
345 else if (I == lo.x || I == hi.x)
348 (+fdata(i, j - 1, k - 1, n) + 2.0 * fdata(i, j, k - 1, n) + fdata(i, j + 1, k - 1, n)
349 + 2.0 * fdata(i, j - 1, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j + 1, k, n)
350 + fdata(i, j - 1, k + 1, n) + 2.0 * fdata(i, j, k + 1, n) + fdata(i, j + 1, k + 1, n)) / 16.0;
352 else if (J == lo.y || J == hi.y)
355 (+fdata(i - 1, j, k - 1, n) + 2.0 * fdata(i - 1, j, k, n) + fdata(i - 1, j, k + 1, n)
356 + 2.0 * fdata(i, j, k - 1, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j, k + 1, n)
357 + fdata(i + 1, j, k - 1, n) + 2.0 * fdata(i + 1, j, k, n) + fdata(i + 1, j, k + 1, n)) / 16.0;
359 else if (K == lo.z || K == hi.z)
362 (+fdata(i - 1, j - 1, k, n) + 2.0 * fdata(i, j - 1, k, n) + fdata(i + 1, j - 1, k, n)
363 + 2.0 * fdata(i - 1, j, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i + 1, j, k, n)
364 + fdata(i - 1, j + 1, k, n) + 2.0 * fdata(i, j + 1, k, n) + fdata(i + 1, j + 1, k, n)) / 16.0;
368 (fdata(i - 1, j - 1, k - 1, n) + fdata(i - 1, j - 1, k + 1, n) + fdata(i - 1, j + 1, k - 1, n) + fdata(i - 1, j + 1, k + 1, n) +
369 fdata(i + 1, j - 1, k - 1, n) + fdata(i + 1, j - 1, k + 1, n) + fdata(i + 1, j + 1, k - 1, n) + fdata(i + 1, j + 1, k + 1, n)) / 64.0
371 (fdata(i, j - 1, k - 1, n) + fdata(i, j - 1, k + 1, n) + fdata(i, j + 1, k - 1, n) + fdata(i, j + 1, k + 1, n) +
372 fdata(i - 1, j, k - 1, n) + fdata(i + 1, j, k - 1, n) + fdata(i - 1, j, k + 1, n) + fdata(i + 1, j, k + 1, n) +
373 fdata(i - 1, j - 1, k, n) + fdata(i - 1, j + 1, k, n) + fdata(i + 1, j - 1, k, n) + fdata(i + 1, j + 1, k, n)) / 32.0
375 (fdata(i - 1, j, k, n) + fdata(i, j - 1, k, n) + fdata(i, j, k - 1, n) +
376 fdata(i + 1, j, k, n) + fdata(i, j + 1, k, n) + fdata(i, j, k + 1, n)) / 16.0
378 fdata(i, j, k, n) / 8.0;
383 if (need_parallel_copy) {
384 crse.ParallelCopy(cfine);
387 amrex::Geometry geom = m_geom[amrlev][cmglev];
388 realFillBoundary(crse, geom);
389 nodalSync(amrlev, cmglev, crse);
394 BL_PROFILE(
"Operator::interpolation()");
396 amrex::Box fdomain = m_geom[amrlev][fmglev].Domain(); fdomain.convert(amrex::IntVect::TheNodeVector());
398 bool need_parallel_copy = !amrex::isMFIterSafe(crse, fine);
400 const MultiFab* cmf = &crse;
401 if (need_parallel_copy) {
402 const BoxArray& ba = amrex::coarsen(fine.boxArray(), 2);
403 cfine.define(ba, fine.DistributionMap(), crse.nComp(), crse.nGrow());
404 cfine.ParallelCopy(crse);
408 for (MFIter mfi(fine,
false); mfi.isValid(); ++mfi)
410 const Box& fine_bx = mfi.validbox() & fdomain;
411 const Box& course_bx = amrex::coarsen(fine_bx, 2);
412 const Box& tmpbx = amrex::refine(course_bx, 2);
414 tmpfab.resize(tmpbx, fine.nComp());
416 const amrex::FArrayBox& crsefab = (*cmf)[mfi];
418 amrex::Array4<const amrex::Real>
const& cdata = crsefab.const_array();
419 amrex::Array4<amrex::Real>
const& fdata = tmpfab.array();
421 for (
int n = 0; n < crse.nComp(); n++)
425 amrex::ParallelFor(fine_bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) {
427 int I = i / 2, J = j / 2, K = k / 2;
429 if (i % 2 == 0 && j % 2 == 0 && k % 2 == 0)
430 fdata(i, j, k, n) = cdata(I, J, K, n);
431 else if (j % 2 == 0 && k % 2 == 0)
432 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I + 1, J, K, n));
433 else if (k % 2 == 0 && i % 2 == 0)
434 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I, J + 1, K, n));
435 else if (i % 2 == 0 && j % 2 == 0)
436 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I, J, K + 1, n));
438 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I, J + 1, K, n) +
439 cdata(I, J, K + 1, n) + cdata(I, J + 1, K + 1, n));
441 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I, J, K + 1, n) +
442 cdata(I + 1, J, K, n) + cdata(I + 1, J, K + 1, n));
444 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I + 1, J, K, n) +
445 cdata(I, J + 1, K, n) + cdata(I + 1, J + 1, K, n));
447 fdata(i, j, k, n) = 0.125 * (cdata(I, J, K, n) +
448 cdata(I + 1, J, K, n) + cdata(I, J + 1, K, n) + cdata(I, J, K + 1, n) +
449 cdata(I, J + 1, K + 1, n) + cdata(I + 1, J, K + 1, n) + cdata(I + 1, J + 1, K, n) +
450 cdata(I + 1, J + 1, K + 1, n));
454 fine[mfi].plus(tmpfab, fine_bx, fine_bx, 0, 0, fine.nComp());
457 amrex::Geometry geom = m_geom[amrlev][fmglev];
458 realFillBoundary(fine, geom);
459 nodalSync(amrlev, fmglev, fine);
463 const MultiFab& fine_sol,
const MultiFab& )
465 BL_PROFILE(
"Operator::averageDownSolutionRHS()");
466 const auto& amrrr = AMRRefRatio(camrlev);
467 amrex::average_down(fine_sol, crse_sol, 0, crse_sol.nComp(), amrrr);
482 amrex::MLLinOp::StateMode ,
bool skip_fillboundary)
const
484 BL_PROFILE(
"Operator::applyBC()");
486 const Geometry& geom = m_geom[amrlev][mglev];
488 if (!skip_fillboundary) {
490 realFillBoundary(phi, geom);
494 const amrex::FArrayBox&
497 BL_PROFILE(
"Operator::GetFab()");
499 return m_a_coeffs[num][amrlev][mglev][mfi];
504 BL_PROFILE(
"Operator::RegisterNewFab()");
507 m_a_coeffs.resize(m_a_coeffs.size() + 1);
508 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
509 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
511 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
512 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
513 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
514 m_dmap[amrlev][mglev],
515 input[amrlev].nComp(),
516 input[amrlev].nGrow());
518 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
520 input[amrlev].nComp(),
521 input[amrlev].nGrow());
529 BL_PROFILE(
"Operator::RegisterNewFab()");
532 m_a_coeffs.resize(m_a_coeffs.size() + 1);
533 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
534 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
536 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
537 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
538 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
539 m_dmap[amrlev][mglev],
540 input[amrlev]->nComp(),
541 input[amrlev]->nGrow());
543 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
544 *input[amrlev], 0, 0,
545 input[amrlev]->nComp(),
546 input[amrlev]->nGrow());
552 MultiFab& res,
const MultiFab& ,
const MultiFab& ,
553 MultiFab& fine_res, MultiFab& ,
const MultiFab& )
const
555 BL_PROFILE(
"Operator::Elastic::reflux()");
557 int ncomp = AMREX_SPACEDIM;
559 amrex::Box cdomain(m_geom[crse_amrlev][0].Domain());
560 cdomain.convert(amrex::IntVect::TheNodeVector());
562 const Geometry& cgeom = m_geom[crse_amrlev][0];
564 const BoxArray& fba = fine_res.boxArray();
565 const DistributionMapping& fdm = fine_res.DistributionMap();
567 MultiFab fine_res_for_coarse(amrex::coarsen(fba, 2), fdm, ncomp, 2);
568 fine_res_for_coarse.ParallelCopy(res, 0, 0, ncomp, 0, 0, cgeom.periodicity());
570 applyBC(crse_amrlev + 1, 0, fine_res, BCMode::Inhomogeneous, StateMode::Solution);
574 const int coarse_fine_node = 1;
575 const int fine_fine_node = 2;
577 amrex::iMultiFab nodemask(amrex::coarsen(fba, 2), fdm, 1, 2);
578 nodemask.ParallelCopy(*m_nd_fine_mask[crse_amrlev], 0, 0, 1, 0, 0, cgeom.periodicity());
580 amrex::iMultiFab cellmask(amrex::convert(amrex::coarsen(fba, 2), amrex::IntVect::TheCellVector()), fdm, 1, 2);
581 cellmask.ParallelCopy(*m_cc_fine_mask[crse_amrlev], 0, 0, 1, 1, 1, cgeom.periodicity());
583 for (MFIter mfi(fine_res_for_coarse,
false); mfi.isValid(); ++mfi)
585 const Box& bx = mfi.validbox();
587 amrex::Array4<const int>
const& nmask = nodemask.array(mfi);
590 amrex::Array4<amrex::Real>
const& cdata = fine_res_for_coarse.array(mfi);
591 amrex::Array4<const amrex::Real>
const& fdata = fine_res.array(mfi);
593 const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
595 for (
int n = 0; n < fine_res.nComp(); n++)
599 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int I,
int J,
int K) {
600 int i = I * 2, j = J * 2, k = K * 2;
602 if (nmask(I, J, K) == fine_fine_node || nmask(I, J, K) == coarse_fine_node)
604 if ((I == lo.x || I == hi.x) &&
605 (J == lo.y || J == hi.y) &&
606 (K == lo.z || K == hi.z))
607 cdata(I, J, K, n) = fdata(i, j, k, n);
608 else if ((J == lo.y || J == hi.y) &&
609 (K == lo.z || K == hi.z))
610 cdata(I, J, K, n) = 0.25 * fdata(i - 1, j, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i + 1, j, k, n);
611 else if ((K == lo.z || K == hi.z) &&
612 (I == lo.x || I == hi.x))
613 cdata(I, J, K, n) = 0.25 * fdata(i, j - 1, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j + 1, k, n);
614 else if ((I == lo.x || I == hi.x) &&
615 (J == lo.y || J == hi.y))
616 cdata(I, J, K, n) = 0.25 * fdata(i, j, k - 1, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j, k + 1, n);
617 else if (I == lo.x || I == hi.x)
619 (+fdata(i, j - 1, k - 1, n) + 2.0 * fdata(i, j, k - 1, n) + fdata(i, j + 1, k - 1, n)
620 + 2.0 * fdata(i, j - 1, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j + 1, k, n)
621 + fdata(i, j - 1, k + 1, n) + 2.0 * fdata(i, j, k + 1, n) + fdata(i, j + 1, k + 1, n)) / 16.0;
622 else if (J == lo.y || J == hi.y)
624 (+fdata(i - 1, j, k - 1, n) + 2.0 * fdata(i - 1, j, k, n) + fdata(i - 1, j, k + 1, n)
625 + 2.0 * fdata(i, j, k - 1, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j, k + 1, n)
626 + fdata(i + 1, j, k - 1, n) + 2.0 * fdata(i + 1, j, k, n) + fdata(i + 1, j, k + 1, n)) / 16.0;
627 else if (K == lo.z || K == hi.z)
629 (+fdata(i - 1, j - 1, k, n) + 2.0 * fdata(i, j - 1, k, n) + fdata(i + 1, j - 1, k, n)
630 + 2.0 * fdata(i - 1, j, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i + 1, j, k, n)
631 + fdata(i - 1, j + 1, k, n) + 2.0 * fdata(i, j + 1, k, n) + fdata(i + 1, j + 1, k, n)) / 16.0;
634 (fdata(i - 1, j - 1, k - 1, n) + fdata(i - 1, j - 1, k + 1, n) + fdata(i - 1, j + 1, k - 1, n) + fdata(i - 1, j + 1, k + 1, n) +
635 fdata(i + 1, j - 1, k - 1, n) + fdata(i + 1, j - 1, k + 1, n) + fdata(i + 1, j + 1, k - 1, n) + fdata(i + 1, j + 1, k + 1, n)) / 64.0
637 (fdata(i, j - 1, k - 1, n) + fdata(i, j - 1, k + 1, n) + fdata(i, j + 1, k - 1, n) + fdata(i, j + 1, k + 1, n) +
638 fdata(i - 1, j, k - 1, n) + fdata(i + 1, j, k - 1, n) + fdata(i - 1, j, k + 1, n) + fdata(i + 1, j, k + 1, n) +
639 fdata(i - 1, j - 1, k, n) + fdata(i - 1, j + 1, k, n) + fdata(i + 1, j - 1, k, n) + fdata(i + 1, j + 1, k, n)) / 32.0
641 (fdata(i - 1, j, k, n) + fdata(i, j - 1, k, n) + fdata(i, j, k - 1, n) +
642 fdata(i + 1, j, k, n) + fdata(i, j + 1, k, n) + fdata(i, j, k + 1, n)) / 16.0
644 fdata(i, j, k, n) / 8.0;
653 res.ParallelCopy(fine_res_for_coarse, 0, 0, ncomp, 0, 0, cgeom.periodicity());
658 amrex::Geometry geom = m_geom[crse_amrlev][mglev];
659 realFillBoundary(res, geom);
660 nodalSync(crse_amrlev, mglev, res);
669 const int ncomp = b.nComp();
670 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Solution);
671 MultiFab::Xpay(resid, -1.0, b, 0, 0, ncomp, 2);
672 amrex::Geometry geom = m_geom[amrlev][mglev];
673 realFillBoundary(resid, geom);
678 BCMode ,
const MultiFab* )
681 apply(amrlev, mglev, resid, x, BCMode::Homogeneous, StateMode::Correction);
682 int ncomp = b.nComp();
683 MultiFab::Xpay(resid, -1.0, b, 0, 0, ncomp, resid.nGrow());
684 amrex::Geometry geom = m_geom[amrlev][mglev];
685 realFillBoundary(resid, geom);
693 m_ixtype = amrex::IntVect::TheCellVector();
698 const amrex::Vector<amrex::BoxArray>& a_grids,
699 const amrex::Vector<amrex::DistributionMapping>& a_dmap,
701 const amrex::LPInfo& a_info,
702 const amrex::Vector<amrex::FabFactory<amrex::FArrayBox>
const*>& a_factory)
706 std::array<int, AMREX_SPACEDIM> is_periodic = m_bc->
IsPeriodic();
708 MLCellLinOp::define(a_geom, a_grids, a_dmap, a_info, a_factory);
711 for (
int n = 0; n < getNComp(); n++)
713 m_lobc.push_back({
AMREX_D_DECL(is_periodic[0] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
714 is_periodic[1] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
715 is_periodic[2] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet) });
716 m_hibc.push_back({
AMREX_D_DECL(is_periodic[0] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
717 is_periodic[1] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
718 is_periodic[2] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet) });
721 for (
int ilev = 0; ilev < a_geom.size(); ++ilev)
722 setLevelBC(ilev,
nullptr);
730 MLCellLinOp::prepareForSolve();
741 const amrex::Array<BCType, AMREX_SPACEDIM>& ,
742 const amrex::Array<BCType, AMREX_SPACEDIM>& ,
743 int ,
const amrex::RealVect& )
752 for (
int i = 0; i < m_num_a_fabs; i++)
754 for (
int amrlev = m_num_amr_levels - 1; amrlev > 0; --amrlev)
756 auto& fine_a_coeffs = m_a_coeffs[i][amrlev];
757 averageDownCoeffsSameAmrLevel(fine_a_coeffs);
759 averageDownCoeffsSameAmrLevel(m_a_coeffs[i][0]);
766 int nmglevs = a.size();
767 for (
int mglev = 1; mglev < nmglevs; ++mglev)
769 amrex::average_down(a[mglev - 1], a[mglev], 0, a[0].nComp(), mg_coarsen_ratio);
775 const amrex::FArrayBox&
778 return m_a_coeffs[num][amrlev][mglev][mfi];
786 m_a_coeffs.resize(m_a_coeffs.size() + 1);
787 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
788 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
790 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
791 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
792 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
793 m_dmap[amrlev][mglev],
794 input[amrlev].nComp(),
795 input[amrlev].nGrow());
797 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
799 input[amrlev].nComp(),
800 input[amrlev].nGrow());
810 m_a_coeffs.resize(m_a_coeffs.size() + 1);
811 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
812 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
814 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
815 for (
int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
816 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
817 m_dmap[amrlev][mglev],
818 input[amrlev]->nComp(),
819 input[amrlev]->nGrow());
821 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
822 *input[amrlev], 0, 0,
823 input[amrlev]->nComp(),
824 input[amrlev]->nGrow());