Alamo
Operator.cpp
Go to the documentation of this file.
1
2#include <AMReX_MLNodeLinOp.H>
3#include <AMReX_MLCellLinOp.H>
4#include <AMReX_MLNodeLap_K.H>
5#include <AMReX_MultiFabUtil.H>
6#include "Util/Color.H"
7#include "Set/Set.H"
8#include "Operator.H"
9
10using namespace amrex;
11namespace Operator {
12
13// constexpr amrex::IntVect AMREX_D_DECL(Operator<Grid::Node>::dx,Operator<Grid::Node>::dy,Operator<Grid::Node>::dz);
15
17{
18 BL_PROFILE(Color::FG::Yellow + "Operator::Diagonal()" + Color::Reset);
19 if (!recompute && m_diagonal_computed) return;
20 m_diagonal_computed = true;
21
22 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
23 {
24 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
25 {
26 Diagonal(amrlev, mglev, *m_diag[amrlev][mglev]);
27 }
28 }
29}
30
31void Operator<Grid::Node>::Diagonal(int amrlev, int mglev, amrex::MultiFab& diag)
32{
33 BL_PROFILE("Operator::Diagonal()");
34 //Util::Message(INFO);
35
36 int ncomp = diag.nComp();
37 int nghost = 0;
38
39 int sep = 2;
40 int num = AMREX_D_TERM(sep, *sep, *sep);
41 int cntr = 0;
42
43 amrex::MultiFab x(m_diag[amrlev][mglev]->boxArray(), m_diag[amrlev][mglev]->DistributionMap(), ncomp, nghost);
44 amrex::MultiFab Ax(m_diag[amrlev][mglev]->boxArray(), m_diag[amrlev][mglev]->DistributionMap(), ncomp, nghost);
45
46 for (MFIter mfi(x, false); mfi.isValid(); ++mfi)
47 {
48 const Box& bx = mfi.validbox();
49 amrex::FArrayBox& diagfab = diag[mfi];
50 amrex::FArrayBox& xfab = x[mfi];
51 amrex::FArrayBox& Axfab = Ax[mfi];
52
53 diagfab.setVal(0.0);
54
55 for (int i = 0; i < num; i++)
56 {
57 for (int n = 0; n < ncomp; n++)
58 {
59 xfab.setVal(0.0);
60 Axfab.setVal(0.0);
61
62 //BL_PROFILE_VAR("Operator::Part1", part1);
63 AMREX_D_TERM(for (int m1 = bx.loVect()[0]; m1 <= bx.hiVect()[0]; m1++),
64 for (int m2 = bx.loVect()[1]; m2 <= bx.hiVect()[1]; m2++),
65 for (int m3 = bx.loVect()[2]; m3 <= bx.hiVect()[2]; m3++))
66 {
67 amrex::IntVect m(AMREX_D_DECL(m1, m2, m3));
68
69 if (m1 % sep == i / sep && m2 % sep == i % sep) xfab(m, n) = 1.0;
70 else xfab(m, n) = 0.0;
71 }
72 //BL_PROFILE_VAR_STOP(part1);
73
74 BL_PROFILE_VAR("Operator::Part2", part2);
75 Util::Message(INFO, "Calling fapply...", cntr++);
76 Fapply(amrlev, mglev, Ax, x);
77 BL_PROFILE_VAR_STOP(part2);
78
79 //BL_PROFILE_VAR("Operator::Part3", part3);
80 Axfab.mult(xfab, n, n, 1);
81 diagfab.plus(Axfab, n, n, 1);
82 //BL_PROFILE_VAR_STOP(part3);
83 }
84 }
85 }
86}
87
88void Operator<Grid::Node>::Fsmooth(int amrlev, int mglev, amrex::MultiFab& x, const amrex::MultiFab& b) const
89{
90 BL_PROFILE("Operator::Fsmooth()");
91
92 amrex::Box domain(m_geom[amrlev][mglev].Domain());
93
94 int ncomp = b.nComp();
95 int nghost = 2; //b.nGrow();
96
97
98 amrex::MultiFab Ax(x.boxArray(), x.DistributionMap(), ncomp, nghost);
99 amrex::MultiFab Dx(x.boxArray(), x.DistributionMap(), ncomp, nghost);
100 amrex::MultiFab Rx(x.boxArray(), x.DistributionMap(), ncomp, nghost);
101
102 if (!m_diagonal_computed) Util::Abort(INFO, "Operator::Diagonal() must be called before using Fsmooth");
103
104 // This is a JACOBI iteration, not Gauss-Seidel.
105 // So we need to do twice the number of iterations to get the same behavior as GS.
106 for (int ctr = 0; ctr < 2; ctr++)
107 {
108 Fapply(amrlev, mglev, Ax, x); // find Ax
109
110 amrex::MultiFab::Copy(Dx, x, 0, 0, ncomp, nghost); // Dx = x
111 amrex::MultiFab::Multiply(Dx, *m_diag[amrlev][mglev], 0, 0, ncomp, nghost); // Dx *= diag (Dx = x*diag)
112
113 amrex::MultiFab::Copy(Rx, Ax, 0, 0, ncomp, nghost); // Rx = Ax
114 amrex::MultiFab::Subtract(Rx, Dx, 0, 0, ncomp, nghost); // Rx -= Dx (Rx = Ax - Dx)
115
116 for (MFIter mfi(x, false); mfi.isValid(); ++mfi)
117 {
118 const Box& bx = mfi.validbox();
119 amrex::FArrayBox& xfab = x[mfi];
120 const amrex::FArrayBox& bfab = b[mfi];
121 //const amrex::FArrayBox &Axfab = Ax[mfi];
122 const amrex::FArrayBox& Rxfab = Rx[mfi];
123 const amrex::FArrayBox& diagfab = (*m_diag[amrlev][mglev])[mfi];
124
125 for (int n = 0; n < ncomp; n++)
126 {
127 AMREX_D_TERM(for (int m1 = bx.loVect()[0] - 2; m1 <= bx.hiVect()[0] + 2; m1++),
128 for (int m2 = bx.loVect()[1] - 2; m2 <= bx.hiVect()[1] + 2; m2++),
129 for (int m3 = bx.loVect()[2] - 2; m3 <= bx.hiVect()[2] + 2; m3++))
130 {
131
132 amrex::IntVect m(AMREX_D_DECL(m1, m2, m3));
133
134 // Skip ghost cells outside problem domain
135 if (AMREX_D_TERM(m[0] < domain.loVect()[0], ||
136 m[1] < domain.loVect()[1], ||
137 m[2] < domain.loVect()[2])) continue;
138 if (AMREX_D_TERM(m[0] > domain.hiVect()[0] + 1, ||
139 m[1] > domain.hiVect()[1] + 1, ||
140 m[2] > domain.hiVect()[2] + 1)) continue;
141
142 if (AMREX_D_TERM(m[0] == bx.loVect()[0] - nghost || m[0] == bx.hiVect()[0] + nghost, ||
143 m[1] == bx.loVect()[1] - nghost || m[1] == bx.hiVect()[1] + nghost, ||
144 m[2] == bx.loVect()[2] - nghost || m[2] == bx.hiVect()[2] + nghost))
145 {
146 xfab(m, n) = 0.0;
147 continue;
148 }
149
150 //xfab(m,n) = xfab(m,n) + omega*(bfab(m,n) - Axfab(m,n))/diagfab(m,n);
151 xfab(m, n) = (1. - m_omega) * xfab(m, n) + m_omega * (bfab(m, n) - Rxfab(m, n)) / diagfab(m, n);
152 }
153 }
154 }
155 }
156 amrex::Geometry geom = m_geom[amrlev][mglev];
157 realFillBoundary(x, geom);
158 nodalSync(amrlev, mglev, x);
159}
160
161void Operator<Grid::Node>::normalize(int amrlev, int mglev, MultiFab& a_x) const
162{
163 BL_PROFILE("Operator::normalize()");
164 amrex::Box domain(m_geom[amrlev][mglev].Domain());
165 domain.convert(amrex::IntVect::TheNodeVector());
166
167 int ncomp = getNComp();
168 int nghost = 1; //x.nGrow();
169
170 if (!m_diagonal_computed)
171 Util::Abort(INFO, "Operator::Diagonal() must be called before using normalize");
172
173 for (MFIter mfi(a_x, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
174 {
175
176 Box bx = mfi.tilebox();
177 bx.grow(nghost);
178 bx = bx & domain;
179
180 amrex::Array4<amrex::Real> const& x = a_x.array(mfi);
181 amrex::Array4<const amrex::Real> const& diag = m_diag[amrlev][mglev]->array(mfi);
182
183 for (int n = 0; n < ncomp; n++)
184 {
185 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
186
187 x(i, j, k) /= diag(i, j, k);
188
189 });
190 }
191 }
192}
193
194Operator<Grid::Node>::Operator(const Vector<Geometry>& a_geom,
195 const Vector<BoxArray>& a_grids,
196 const Vector<DistributionMapping>& a_dmap,
197 const LPInfo& a_info,
198 const Vector<FabFactory<FArrayBox> const*>& a_factory)
199{
200 BL_PROFILE("Operator::Operator()");
202
203 if (!(a_grids[0].ixType() == amrex::IndexType::TheNodeType()))
204 Util::Abort(INFO, "Operator must be defined using CELL CENTERED boxarrays.");
205
206 define(a_geom, a_grids, a_dmap, a_info, a_factory);
207}
208
211
212void Operator<Grid::Node>::define(const Vector<Geometry>& a_geom,
213 const Vector<BoxArray>& a_grids,
214 const Vector<DistributionMapping>& a_dmap,
215 const LPInfo& a_info,
216 const Vector<FabFactory<FArrayBox> const*>& a_factory)
217{
218 BL_PROFILE("Operator::~Operator()");
219
220 // Make sure we're not trying to parallelize in vain.
221 if (amrex::ParallelDescriptor::NProcs() > a_grids[0].size())
222 {
223 Util::Warning(INFO, "There are more processors than there are boxes in the amrlev=0 boxarray!!\n",
224 "(NProcs = ", amrex::ParallelDescriptor::NProcs(), ", a_grids[0].size() = ", a_grids[0].size(), ")\n",
225 "You should decrease max_grid_size or you will not get proper scaling!");
226 }
227
228 // This makes sure grids are node-centered;
229 Vector<BoxArray> cc_grids = a_grids;
230 for (auto& ba : cc_grids) {
231 ba.enclosedCells();
232 }
233
234 MLNodeLinOp::define(a_geom, a_grids, a_dmap, a_info, a_factory);
235
236 int nghost = 2;
237 // Resize the multifab containing the operator diagonal
238 m_diag.resize(m_num_amr_levels);
239 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
240 {
241 m_diag[amrlev].resize(m_num_mg_levels[amrlev]);
242
243 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
244 {
245 m_diag[amrlev][mglev].reset(new MultiFab(amrex::convert(m_grids[amrlev][mglev], amrex::IntVect::TheNodeVector()),
246 m_dmap[amrlev][mglev], getNComp(), nghost));
247 }
248 }
249
250 // We need to instantiate the m_lobc objects.
251 // WE DO NOT USE THEM - our BCs are implemented differently.
252 // But they need to be the right size or the code will segfault.
253 m_lobc.resize(getNComp(), { {AMREX_D_DECL(BCType::bogus,BCType::bogus,BCType::bogus)} });
254 m_hibc.resize(getNComp(), { {AMREX_D_DECL(BCType::bogus,BCType::bogus,BCType::bogus)} });
255}
256
257void Operator<Grid::Node>::fixUpResidualMask(int amrlev, iMultiFab& resmsk)
258{
259 BL_PROFILE("Operator::fixUpResidualMask()");
260
261 if (!m_masks_built) buildMasks();
262
263 const iMultiFab& cfmask = *m_nd_fine_mask[amrlev];
264
265#ifdef _OPENMP
266#pragma omp parallel if (Gpu::notInLaunchRegion())
267#endif
268 for (MFIter mfi(resmsk, TilingIfNotGPU()); mfi.isValid(); ++mfi)
269 {
270 const Box& bx = mfi.tilebox();
271 Array4<int> const& rmsk = resmsk.array(mfi);
272 Array4<int const> const& fmsk = cfmask.const_array(mfi);
273 AMREX_HOST_DEVICE_PARALLEL_FOR_3D(bx, i, j, k,
274 {
275 if (fmsk(i,j,k) == amrex::nodelap_detail::crse_fine_node) rmsk(i,j,k) = 1;
276 });
277 }
278}
279
281{
282 BL_PROFILE("Operator::prepareForSolve()");
283 MLNodeLinOp::prepareForSolve();
284 buildMasks();
285 averageDownCoeffs();
286 Diagonal(true);
287}
288
289void Operator<Grid::Node>::restriction(int amrlev, int cmglev, MultiFab& crse, MultiFab& fine) const
290{
291 BL_PROFILE("Operator::restriction()");
292
293 applyBC(amrlev, cmglev - 1, fine, BCMode::Homogeneous, StateMode::Solution);
294
295 amrex::Box cdomain = m_geom[amrlev][cmglev].Domain();
296 cdomain.convert(amrex::IntVect::TheNodeVector());
297
298 bool need_parallel_copy = !amrex::isMFIterSafe(crse, fine);
299 MultiFab cfine;
300 if (need_parallel_copy) {
301 const BoxArray& ba = amrex::coarsen(fine.boxArray(), 2);
302 cfine.define(ba, fine.DistributionMap(), fine.nComp(), fine.nGrow());
303 }
304
305 MultiFab* pcrse = (need_parallel_copy) ? &cfine : &crse;
306
307 for (MFIter mfi(*pcrse, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
308 {
309 const Box& bx = mfi.tilebox();
310
311 amrex::Array4<const amrex::Real> const& fdata = fine.array(mfi);
312 amrex::Array4<amrex::Real> const& cdata = pcrse->array(mfi);
313
314 const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
315
316
317 for (int n = 0; n < crse.nComp(); n++)
318 {
319 // I,J,K == coarse coordinates
320 // i,j,k == fine coordinates
321 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int I, int J, int K) {
322 int i = 2 * I, j = 2 * J, k = 2 * K;
323
324 if ((I == lo.x || I == hi.x) &&
325 (J == lo.y || J == hi.y) &&
326 (K == lo.z || K == hi.z)) // Corner
327 {
328 cdata(I, J, K, n) = fdata(i, j, k, n);
329 }
330 else if ((J == lo.y || J == hi.y) &&
331 (K == lo.z || K == hi.z)) // X edge
332 {
333 cdata(I, J, K, n) = 0.25 * fdata(i - 1, j, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i + 1, j, k, n);
334 }
335 else if ((K == lo.z || K == hi.z) &&
336 (I == lo.x || I == hi.x)) // Y edge
337 {
338 cdata(I, J, K, n) = 0.25 * fdata(i, j - 1, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j + 1, k, n);
339 }
340 else if ((I == lo.x || I == hi.x) &&
341 (J == lo.y || J == hi.y)) // Z edge
342 {
343 cdata(I, J, K, n) = 0.25 * fdata(i, j, k - 1, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j, k + 1, n);
344 }
345 else if (I == lo.x || I == hi.x) // X face
346 {
347 cdata(I, J, K, n) =
348 (+fdata(i, j - 1, k - 1, n) + 2.0 * fdata(i, j, k - 1, n) + fdata(i, j + 1, k - 1, n)
349 + 2.0 * fdata(i, j - 1, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j + 1, k, n)
350 + fdata(i, j - 1, k + 1, n) + 2.0 * fdata(i, j, k + 1, n) + fdata(i, j + 1, k + 1, n)) / 16.0;
351 }
352 else if (J == lo.y || J == hi.y) // Y face
353 {
354 cdata(I, J, K, n) =
355 (+fdata(i - 1, j, k - 1, n) + 2.0 * fdata(i - 1, j, k, n) + fdata(i - 1, j, k + 1, n)
356 + 2.0 * fdata(i, j, k - 1, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j, k + 1, n)
357 + fdata(i + 1, j, k - 1, n) + 2.0 * fdata(i + 1, j, k, n) + fdata(i + 1, j, k + 1, n)) / 16.0;
358 }
359 else if (K == lo.z || K == hi.z) // Z face
360 {
361 cdata(I, J, K, n) =
362 (+fdata(i - 1, j - 1, k, n) + 2.0 * fdata(i, j - 1, k, n) + fdata(i + 1, j - 1, k, n)
363 + 2.0 * fdata(i - 1, j, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i + 1, j, k, n)
364 + fdata(i - 1, j + 1, k, n) + 2.0 * fdata(i, j + 1, k, n) + fdata(i + 1, j + 1, k, n)) / 16.0;
365 }
366 else // Interior
367 cdata(I, J, K, n) =
368 (fdata(i - 1, j - 1, k - 1, n) + fdata(i - 1, j - 1, k + 1, n) + fdata(i - 1, j + 1, k - 1, n) + fdata(i - 1, j + 1, k + 1, n) +
369 fdata(i + 1, j - 1, k - 1, n) + fdata(i + 1, j - 1, k + 1, n) + fdata(i + 1, j + 1, k - 1, n) + fdata(i + 1, j + 1, k + 1, n)) / 64.0
370 +
371 (fdata(i, j - 1, k - 1, n) + fdata(i, j - 1, k + 1, n) + fdata(i, j + 1, k - 1, n) + fdata(i, j + 1, k + 1, n) +
372 fdata(i - 1, j, k - 1, n) + fdata(i + 1, j, k - 1, n) + fdata(i - 1, j, k + 1, n) + fdata(i + 1, j, k + 1, n) +
373 fdata(i - 1, j - 1, k, n) + fdata(i - 1, j + 1, k, n) + fdata(i + 1, j - 1, k, n) + fdata(i + 1, j + 1, k, n)) / 32.0
374 +
375 (fdata(i - 1, j, k, n) + fdata(i, j - 1, k, n) + fdata(i, j, k - 1, n) +
376 fdata(i + 1, j, k, n) + fdata(i, j + 1, k, n) + fdata(i, j, k + 1, n)) / 16.0
377 +
378 fdata(i, j, k, n) / 8.0;
379 });
380 }
381 }
382
383 if (need_parallel_copy) {
384 crse.ParallelCopy(cfine);
385 }
386
387 amrex::Geometry geom = m_geom[amrlev][cmglev];
388 realFillBoundary(crse, geom);
389 nodalSync(amrlev, cmglev, crse);
390}
391
392void Operator<Grid::Node>::interpolation(int amrlev, int fmglev, MultiFab& fine, const MultiFab& crse) const
393{
394 BL_PROFILE("Operator::interpolation()");
395 //int nghost = getNGrow();
396 amrex::Box fdomain = m_geom[amrlev][fmglev].Domain(); fdomain.convert(amrex::IntVect::TheNodeVector());
397
398 bool need_parallel_copy = !amrex::isMFIterSafe(crse, fine);
399 MultiFab cfine;
400 const MultiFab* cmf = &crse;
401 if (need_parallel_copy) {
402 const BoxArray& ba = amrex::coarsen(fine.boxArray(), 2);
403 cfine.define(ba, fine.DistributionMap(), crse.nComp(), crse.nGrow());
404 cfine.ParallelCopy(crse);
405 cmf = &cfine;
406 }
407
408 for (MFIter mfi(fine, false); mfi.isValid(); ++mfi)
409 {
410 const Box& fine_bx = mfi.validbox() & fdomain;
411 const Box& course_bx = amrex::coarsen(fine_bx, 2);
412 const Box& tmpbx = amrex::refine(course_bx, 2);
413 FArrayBox tmpfab;
414 tmpfab.resize(tmpbx, fine.nComp());
415 tmpfab.setVal(0.0);
416 const amrex::FArrayBox& crsefab = (*cmf)[mfi];
417
418 amrex::Array4<const amrex::Real> const& cdata = crsefab.const_array();
419 amrex::Array4<amrex::Real> const& fdata = tmpfab.array();
420
421 for (int n = 0; n < crse.nComp(); n++)
422 {
423 // I,J,K == coarse coordinates
424 // i,j,k == fine coordinates
425 amrex::ParallelFor(fine_bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
426
427 int I = i / 2, J = j / 2, K = k / 2;
428
429 if (i % 2 == 0 && j % 2 == 0 && k % 2 == 0) // Coincident
430 fdata(i, j, k, n) = cdata(I, J, K, n);
431 else if (j % 2 == 0 && k % 2 == 0) // X Edge
432 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I + 1, J, K, n));
433 else if (k % 2 == 0 && i % 2 == 0) // Y Edge
434 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I, J + 1, K, n));
435 else if (i % 2 == 0 && j % 2 == 0) // Z Edge
436 fdata(i, j, k, n) = 0.5 * (cdata(I, J, K, n) + cdata(I, J, K + 1, n));
437 else if (i % 2 == 0) // X Face
438 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I, J + 1, K, n) +
439 cdata(I, J, K + 1, n) + cdata(I, J + 1, K + 1, n));
440 else if (j % 2 == 0) // Y Face
441 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I, J, K + 1, n) +
442 cdata(I + 1, J, K, n) + cdata(I + 1, J, K + 1, n));
443 else if (k % 2 == 0) // Z Face
444 fdata(i, j, k, n) = 0.25 * (cdata(I, J, K, n) + cdata(I + 1, J, K, n) +
445 cdata(I, J + 1, K, n) + cdata(I + 1, J + 1, K, n));
446 else // Center
447 fdata(i, j, k, n) = 0.125 * (cdata(I, J, K, n) +
448 cdata(I + 1, J, K, n) + cdata(I, J + 1, K, n) + cdata(I, J, K + 1, n) +
449 cdata(I, J + 1, K + 1, n) + cdata(I + 1, J, K + 1, n) + cdata(I + 1, J + 1, K, n) +
450 cdata(I + 1, J + 1, K + 1, n));
451
452 });
453 }
454 fine[mfi].plus(tmpfab, fine_bx, fine_bx, 0, 0, fine.nComp());
455 }
456
457 amrex::Geometry geom = m_geom[amrlev][fmglev];
458 realFillBoundary(fine, geom);
459 nodalSync(amrlev, fmglev, fine);
460}
461
462void Operator<Grid::Node>::averageDownSolutionRHS(int camrlev, MultiFab& crse_sol, MultiFab& /*crse_rhs*/,
463 const MultiFab& fine_sol, const MultiFab& /*fine_rhs*/)
464{
465 BL_PROFILE("Operator::averageDownSolutionRHS()");
466 const auto& amrrr = AMRRefRatio(camrlev);
467 amrex::average_down(fine_sol, crse_sol, 0, crse_sol.nComp(), amrrr);
468
469 if (isSingular(0))
470 {
471 Util::Abort(INFO, "Singular operators not supported!");
472 }
473
474}
475
476void Operator<Grid::Node>::realFillBoundary(MultiFab& phi, const Geometry& geom)
477{
478 Util::RealFillBoundary(phi, geom);
479}
480
481void Operator<Grid::Node>::applyBC(int amrlev, int mglev, MultiFab& phi, BCMode/* bc_mode*/,
482 amrex::MLLinOp::StateMode /**/, bool skip_fillboundary) const
483{
484 BL_PROFILE("Operator::applyBC()");
485
486 const Geometry& geom = m_geom[amrlev][mglev];
487
488 if (!skip_fillboundary) {
489
490 realFillBoundary(phi, geom);
491 }
492}
493
494const amrex::FArrayBox&
495Operator<Grid::Node>::GetFab(const int num, const int amrlev, const int mglev, const amrex::MFIter& mfi) const
496{
497 BL_PROFILE("Operator::GetFab()");
499 return m_a_coeffs[num][amrlev][mglev][mfi];
500}
501
502void Operator<Grid::Node>::RegisterNewFab(amrex::Vector<amrex::MultiFab>& input)
503{
504 BL_PROFILE("Operator::RegisterNewFab()");
506 /// \todo assertions here
507 m_a_coeffs.resize(m_a_coeffs.size() + 1);
508 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
509 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
510 {
511 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
512 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
513 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
514 m_dmap[amrlev][mglev],
515 input[amrlev].nComp(),
516 input[amrlev].nGrow());
517
518 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
519 input[amrlev], 0, 0,
520 input[amrlev].nComp(),
521 input[amrlev].nGrow());
522 }
523 m_num_a_fabs++;
524}
525
526
527void Operator<Grid::Node>::RegisterNewFab(amrex::Vector<std::unique_ptr<amrex::MultiFab> >& input)
528{
529 BL_PROFILE("Operator::RegisterNewFab()");
531 /// \todo assertions here
532 m_a_coeffs.resize(m_a_coeffs.size() + 1);
533 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
534 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
535 {
536 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
537 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
538 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
539 m_dmap[amrlev][mglev],
540 input[amrlev]->nComp(),
541 input[amrlev]->nGrow());
542
543 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
544 *input[amrlev], 0, 0,
545 input[amrlev]->nComp(),
546 input[amrlev]->nGrow());
547 }
548 m_num_a_fabs++;
549}
550
551void Operator<Grid::Node>::reflux(int crse_amrlev,
552 MultiFab& res, const MultiFab& /*crse_sol*/, const MultiFab& /*crse_rhs*/,
553 MultiFab& fine_res, MultiFab& /*fine_sol*/, const MultiFab& /*fine_rhs*/) const
554{
555 BL_PROFILE("Operator::Elastic::reflux()");
556
557 int ncomp = AMREX_SPACEDIM;
558
559 amrex::Box cdomain(m_geom[crse_amrlev][0].Domain());
560 cdomain.convert(amrex::IntVect::TheNodeVector());
561
562 const Geometry& cgeom = m_geom[crse_amrlev][0];
563
564 const BoxArray& fba = fine_res.boxArray();
565 const DistributionMapping& fdm = fine_res.DistributionMap();
566
567 MultiFab fine_res_for_coarse(amrex::coarsen(fba, 2), fdm, ncomp, 2);
568 fine_res_for_coarse.ParallelCopy(res, 0, 0, ncomp, 0, 0, cgeom.periodicity());
569
570 applyBC(crse_amrlev + 1, 0, fine_res, BCMode::Inhomogeneous, StateMode::Solution);
571
572 /// \todo Replace with Enum
573 // const int coarse_coarse_node = 0;
574 const int coarse_fine_node = 1;
575 const int fine_fine_node = 2;
576
577 amrex::iMultiFab nodemask(amrex::coarsen(fba, 2), fdm, 1, 2);
578 nodemask.ParallelCopy(*m_nd_fine_mask[crse_amrlev], 0, 0, 1, 0, 0, cgeom.periodicity());
579
580 amrex::iMultiFab cellmask(amrex::convert(amrex::coarsen(fba, 2), amrex::IntVect::TheCellVector()), fdm, 1, 2);
581 cellmask.ParallelCopy(*m_cc_fine_mask[crse_amrlev], 0, 0, 1, 1, 1, cgeom.periodicity());
582
583 for (MFIter mfi(fine_res_for_coarse, false); mfi.isValid(); ++mfi)
584 {
585 const Box& bx = mfi.validbox();
586
587 amrex::Array4<const int> const& nmask = nodemask.array(mfi);
588 //amrex::Array4<const int> const& cmask = cellmask.array(mfi);
589
590 amrex::Array4<amrex::Real> const& cdata = fine_res_for_coarse.array(mfi);
591 amrex::Array4<const amrex::Real> const& fdata = fine_res.array(mfi);
592
593 const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
594
595 for (int n = 0; n < fine_res.nComp(); n++)
596 {
597 // I,J,K == coarse coordinates
598 // i,j,k == fine coordinates
599 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int I, int J, int K) {
600 int i = I * 2, j = J * 2, k = K * 2;
601
602 if (nmask(I, J, K) == fine_fine_node || nmask(I, J, K) == coarse_fine_node)
603 {
604 if ((I == lo.x || I == hi.x) &&
605 (J == lo.y || J == hi.y) &&
606 (K == lo.z || K == hi.z)) // Corner
607 cdata(I, J, K, n) = fdata(i, j, k, n);
608 else if ((J == lo.y || J == hi.y) &&
609 (K == lo.z || K == hi.z)) // X edge
610 cdata(I, J, K, n) = 0.25 * fdata(i - 1, j, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i + 1, j, k, n);
611 else if ((K == lo.z || K == hi.z) &&
612 (I == lo.x || I == hi.x)) // Y edge
613 cdata(I, J, K, n) = 0.25 * fdata(i, j - 1, k, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j + 1, k, n);
614 else if ((I == lo.x || I == hi.x) &&
615 (J == lo.y || J == hi.y)) // Z edge
616 cdata(I, J, K, n) = 0.25 * fdata(i, j, k - 1, n) + 0.5 * fdata(i, j, k, n) + 0.25 * fdata(i, j, k + 1, n);
617 else if (I == lo.x || I == hi.x) // X face
618 cdata(I, J, K, n) =
619 (+fdata(i, j - 1, k - 1, n) + 2.0 * fdata(i, j, k - 1, n) + fdata(i, j + 1, k - 1, n)
620 + 2.0 * fdata(i, j - 1, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j + 1, k, n)
621 + fdata(i, j - 1, k + 1, n) + 2.0 * fdata(i, j, k + 1, n) + fdata(i, j + 1, k + 1, n)) / 16.0;
622 else if (J == lo.y || J == hi.y) // Y face
623 cdata(I, J, K, n) =
624 (+fdata(i - 1, j, k - 1, n) + 2.0 * fdata(i - 1, j, k, n) + fdata(i - 1, j, k + 1, n)
625 + 2.0 * fdata(i, j, k - 1, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i, j, k + 1, n)
626 + fdata(i + 1, j, k - 1, n) + 2.0 * fdata(i + 1, j, k, n) + fdata(i + 1, j, k + 1, n)) / 16.0;
627 else if (K == lo.z || K == hi.z) // Z face
628 cdata(I, J, K, n) =
629 (+fdata(i - 1, j - 1, k, n) + 2.0 * fdata(i, j - 1, k, n) + fdata(i + 1, j - 1, k, n)
630 + 2.0 * fdata(i - 1, j, k, n) + 4.0 * fdata(i, j, k, n) + 2.0 * fdata(i + 1, j, k, n)
631 + fdata(i - 1, j + 1, k, n) + 2.0 * fdata(i, j + 1, k, n) + fdata(i + 1, j + 1, k, n)) / 16.0;
632 else // Interior
633 cdata(I, J, K, n) =
634 (fdata(i - 1, j - 1, k - 1, n) + fdata(i - 1, j - 1, k + 1, n) + fdata(i - 1, j + 1, k - 1, n) + fdata(i - 1, j + 1, k + 1, n) +
635 fdata(i + 1, j - 1, k - 1, n) + fdata(i + 1, j - 1, k + 1, n) + fdata(i + 1, j + 1, k - 1, n) + fdata(i + 1, j + 1, k + 1, n)) / 64.0
636 +
637 (fdata(i, j - 1, k - 1, n) + fdata(i, j - 1, k + 1, n) + fdata(i, j + 1, k - 1, n) + fdata(i, j + 1, k + 1, n) +
638 fdata(i - 1, j, k - 1, n) + fdata(i + 1, j, k - 1, n) + fdata(i - 1, j, k + 1, n) + fdata(i + 1, j, k + 1, n) +
639 fdata(i - 1, j - 1, k, n) + fdata(i - 1, j + 1, k, n) + fdata(i + 1, j - 1, k, n) + fdata(i + 1, j + 1, k, n)) / 32.0
640 +
641 (fdata(i - 1, j, k, n) + fdata(i, j - 1, k, n) + fdata(i, j, k - 1, n) +
642 fdata(i + 1, j, k, n) + fdata(i, j + 1, k, n) + fdata(i, j, k + 1, n)) / 16.0
643 +
644 fdata(i, j, k, n) / 8.0;
645 }
646
647 });
648 }
649 }
650
651 // Copy the fine residual restricted onto the coarse grid
652 // into the final residual.
653 res.ParallelCopy(fine_res_for_coarse, 0, 0, ncomp, 0, 0, cgeom.periodicity());
654
655 const int mglev = 0;
656
657 // Sync up ghost nodes
658 amrex::Geometry geom = m_geom[crse_amrlev][mglev];
659 realFillBoundary(res, geom);
660 nodalSync(crse_amrlev, mglev, res);
661 return;
662}
663
664void
665Operator<Grid::Node>::solutionResidual(int amrlev, MultiFab& resid, MultiFab& x, const MultiFab& b,
666 const MultiFab* /*crse_bcdata*/)
667{
668 const int mglev = 0;
669 const int ncomp = b.nComp();
670 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Solution);
671 MultiFab::Xpay(resid, -1.0, b, 0, 0, ncomp, 2);
672 amrex::Geometry geom = m_geom[amrlev][mglev];
673 realFillBoundary(resid, geom);
674}
675
676void
677Operator<Grid::Node>::correctionResidual(int amrlev, int mglev, MultiFab& resid, MultiFab& x, const MultiFab& b,
678 BCMode /*bc_mode*/, const MultiFab* /*crse_bcdata*/)
679{
680 resid.setVal(0.0);
681 apply(amrlev, mglev, resid, x, BCMode::Homogeneous, StateMode::Correction);
682 int ncomp = b.nComp();
683 MultiFab::Xpay(resid, -1.0, b, 0, 0, ncomp, resid.nGrow());
684 amrex::Geometry geom = m_geom[amrlev][mglev];
685 realFillBoundary(resid, geom);
686}
687
688
689
690
692{
693 m_ixtype = amrex::IntVect::TheCellVector();
694}
695
696void
697Operator<Grid::Cell>::define(amrex::Vector<amrex::Geometry> a_geom,
698 const amrex::Vector<amrex::BoxArray>& a_grids,
699 const amrex::Vector<amrex::DistributionMapping>& a_dmap,
701 const amrex::LPInfo& a_info,
702 const amrex::Vector<amrex::FabFactory<amrex::FArrayBox> const*>& a_factory)
703{
704 m_bc = &a_bc;
705
706 std::array<int, AMREX_SPACEDIM> is_periodic = m_bc->IsPeriodic();
707
708 MLCellLinOp::define(a_geom, a_grids, a_dmap, a_info, a_factory);
709
710 Util::Warning(INFO, "This section of code has not been tested.");
711 for (int n = 0; n < getNComp(); n++)
712 {
713 m_lobc.push_back({ AMREX_D_DECL(is_periodic[0] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
714 is_periodic[1] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
715 is_periodic[2] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet) });
716 m_hibc.push_back({ AMREX_D_DECL(is_periodic[0] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
717 is_periodic[1] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet,
718 is_periodic[2] ? amrex::LinOpBCType::Periodic : amrex::LinOpBCType::Dirichlet) });
719 }
720
721 for (int ilev = 0; ilev < a_geom.size(); ++ilev)
722 setLevelBC(ilev, nullptr);
723
724}
725
726
727void
729{
730 MLCellLinOp::prepareForSolve();
731}
732
733Operator<Grid::Cell>::BndryCondLoc::BndryCondLoc(const amrex::BoxArray& ba, const amrex::DistributionMapping& dm)
734 : bcond(ba, dm),
735 bcloc(ba, dm)
736{
737}
738
739void
740Operator<Grid::Cell>::BndryCondLoc::setLOBndryConds(const amrex::Geometry& /*geom*/, const amrex::Real* /*dx*/,
741 const amrex::Array<BCType, AMREX_SPACEDIM>& /*lobc*/,
742 const amrex::Array<BCType, AMREX_SPACEDIM>& /*hibc*/,
743 int /*ratio*/, const amrex::RealVect& /*a_loc*/)
744{
745 Util::Warning(INFO, "This code has not been properlyt tested");
746}
747
748
749void
751{
752 for (int i = 0; i < m_num_a_fabs; i++)
753 {
754 for (int amrlev = m_num_amr_levels - 1; amrlev > 0; --amrlev)
755 {
756 auto& fine_a_coeffs = m_a_coeffs[i][amrlev];
757 averageDownCoeffsSameAmrLevel(fine_a_coeffs);
758 }
759 averageDownCoeffsSameAmrLevel(m_a_coeffs[i][0]);
760 }
761}
762
763void
765{
766 int nmglevs = a.size();
767 for (int mglev = 1; mglev < nmglevs; ++mglev)
768 {
769 amrex::average_down(a[mglev - 1], a[mglev], 0, a[0].nComp(), mg_coarsen_ratio);
770 }
771}
772
773
774
775const amrex::FArrayBox&
776Operator<Grid::Cell>::GetFab(const int num, const int amrlev, const int mglev, const amrex::MFIter& mfi) const
777{
778 return m_a_coeffs[num][amrlev][mglev][mfi];
779}
780
781
782void
783Operator<Grid::Cell>::RegisterNewFab(amrex::Vector<amrex::MultiFab>& input)
784{
785 /// \todo assertions here
786 m_a_coeffs.resize(m_a_coeffs.size() + 1);
787 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
788 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
789 {
790 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
791 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
792 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
793 m_dmap[amrlev][mglev],
794 input[amrlev].nComp(),
795 input[amrlev].nGrow());
796
797 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
798 input[amrlev], 0, 0,
799 input[amrlev].nComp(),
800 input[amrlev].nGrow());
801 }
802 m_num_a_fabs++;
803}
804
805
806void
807Operator<Grid::Cell>::RegisterNewFab(amrex::Vector<std::unique_ptr<amrex::MultiFab> >& input)
808{
809 /// \todo assertions here
810 m_a_coeffs.resize(m_a_coeffs.size() + 1);
811 m_a_coeffs[m_num_a_fabs].resize(m_num_amr_levels);
812 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
813 {
814 m_a_coeffs[m_num_a_fabs][amrlev].resize(m_num_mg_levels[amrlev]);
815 for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
816 m_a_coeffs[m_num_a_fabs][amrlev][mglev].define(m_grids[amrlev][mglev],
817 m_dmap[amrlev][mglev],
818 input[amrlev]->nComp(),
819 input[amrlev]->nGrow());
820
821 amrex::MultiFab::Copy(m_a_coeffs[m_num_a_fabs][amrlev][0],
822 *input[amrlev], 0, 0,
823 input[amrlev]->nComp(),
824 input[amrlev]->nGrow());
825 }
826 m_num_a_fabs++;
827}
828
829
830}
#define INFO
Definition Util.H:20
Definition BC.H:42
virtual amrex::Array< int, AMREX_SPACEDIM > IsPeriodic()
Definition BC.H:112
static std::string Yellow
Definition Color.H:22
static std::string Reset
Definition Color.H:8
Documentation for operator namespace.
Definition Diagonal.cpp:14
constexpr amrex::IntVect AMREX_D_DECL(Operator< Grid::Cell >::dx, Operator< Grid::Cell >::dy, Operator< Grid::Cell >::dz)
AMREX_FORCE_INLINE void RealFillBoundary(amrex::FabArray< amrex::BaseFab< T > > &a_mf, const amrex::Geometry &)
Definition Util.H:322
void Abort(const char *msg)
Definition Util.cpp:170
void Warning(std::string file, std::string func, int line, Args const &... args)
Definition Util.H:181
void Message(std::string file, std::string func, int line, Args const &... args)
Definition Util.H:141