Alamo
Elastic.cpp
Go to the documentation of this file.
1 // TODO: Remove these
2 
3 #include "Elastic.H"
4 #include "Set/Set.H"
5 
6 #include "Numeric/Stencil.H"
7 namespace Operator
8 {
9 template<int SYM>
10 Elastic<SYM>::Elastic(const Vector<Geometry>& a_geom,
11  const Vector<BoxArray>& a_grids,
12  const Vector<DistributionMapping>& a_dmap,
13  const LPInfo& a_info)
14 {
15  BL_PROFILE("Operator::Elastic::Elastic()");
16 
17  define(a_geom, a_grids, a_dmap, a_info);
18 }
19 
20 template<int SYM>
22 {}
23 
24 template<int SYM>
25 void
26 Elastic<SYM>::define(const Vector<Geometry>& a_geom,
27  const Vector<BoxArray>& a_grids,
28  const Vector<DistributionMapping>& a_dmap,
29  const LPInfo& a_info,
30  const Vector<FabFactory<FArrayBox> const*>& a_factory)
31 {
32  BL_PROFILE("Operator::Elastic::define()");
33 
34  Operator::define(a_geom, a_grids, a_dmap, a_info, a_factory);
35 
36  int model_nghost = 2;
37 
38  m_ddw_mf.resize(m_num_amr_levels);
39  m_psi_mf.resize(m_num_amr_levels);
40  for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
41  {
42  m_ddw_mf[amrlev].resize(m_num_mg_levels[amrlev]);
43  m_psi_mf[amrlev].resize(m_num_mg_levels[amrlev]);
44  for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
45  {
46  m_ddw_mf[amrlev][mglev].reset(new MultiTab(amrex::convert(m_grids[amrlev][mglev],
47  amrex::IntVect::TheNodeVector()),
48  m_dmap[amrlev][mglev], 1, model_nghost));
49  m_psi_mf[amrlev][mglev].reset(new MultiFab(m_grids[amrlev][mglev],
50  m_dmap[amrlev][mglev], 1, model_nghost));
51 
52  if (!m_psi_set) m_psi_mf[amrlev][mglev]->setVal(1.0);
53  }
54  }
55 }
56 
57 template <int SYM>
58 void
60 {
61  for (int amrlev = 0; amrlev < m_num_amr_levels; amrlev++)
62  {
63  amrex::Box domain(m_geom[amrlev][0].Domain());
64  domain.convert(amrex::IntVect::TheNodeVector());
65 
66  int nghost = m_ddw_mf[amrlev][0]->nGrow();
67 
68  for (MFIter mfi(*m_ddw_mf[amrlev][0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
69  {
70  Box bx = mfi.tilebox();
71  bx.grow(nghost); // Expand to cover first layer of ghost nodes
72  bx = bx & domain; // Take intersection of box and the problem domain
73 
74  amrex::Array4<MATRIX4> const& ddw = (*(m_ddw_mf[amrlev][0])).array(mfi);
75 
76  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
77  ddw(i, j, k) = a_model;
78 
79 #ifdef AMREX_DEBUG
80  if (ddw(i, j, k).contains_nan()) Util::Abort(INFO, "model is nan at (", i, ",", j, ",", k, "), amrlev=", amrlev);
81 #endif
82  });
83  }
84  }
85  m_model_set = true;
86 }
87 
88 template <int SYM>
89 void
90 Elastic<SYM>::SetModel(int amrlev, const amrex::FabArray<amrex::BaseFab<MATRIX4> >& a_model)
91 {
92  BL_PROFILE("Operator::Elastic::SetModel()");
93 
94  amrex::Box domain(m_geom[amrlev][0].Domain());
95  domain.convert(amrex::IntVect::TheNodeVector());
96 
97  if (a_model.boxArray() != m_ddw_mf[amrlev][0]->boxArray()) Util::Abort(INFO, "Inconsistent box arrays\n", "a_model.boxArray()=\n", a_model.boxArray(), "\n but the current box array is \n", m_ddw_mf[amrlev][0]->boxArray());
98  if (a_model.DistributionMap() != m_ddw_mf[amrlev][0]->DistributionMap()) Util::Abort(INFO, "Inconsistent distribution maps");
99  if (a_model.nComp() != m_ddw_mf[amrlev][0]->nComp()) Util::Abort(INFO, "Inconsistent # of components - should be ", m_ddw_mf[amrlev][0]->nComp());
100  if (a_model.nGrow() != m_ddw_mf[amrlev][0]->nGrow()) Util::Abort(INFO, "Inconsistent # of ghost nodes, should be ", m_ddw_mf[amrlev][0]->nGrow());
101 
102 
103  int nghost = m_ddw_mf[amrlev][0]->nGrow();
104 
105  for (MFIter mfi(a_model, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
106  {
107  Box bx = mfi.tilebox();
108  bx.grow(nghost); // Expand to cover first layer of ghost nodes
109  bx = bx & domain; // Take intersection of box and the problem domain
110 
111  amrex::Array4<MATRIX4> const& C = (*(m_ddw_mf[amrlev][0])).array(mfi);
112  amrex::Array4<const MATRIX4> const& a_C = a_model.array(mfi);
113 
114  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
115  C(i, j, k) = a_C(i, j, k);
116  });
117  }
118  //FillBoundaryCoeff(*model[amrlev][0], m_geom[amrlev][0]);
119 
120 
121  m_model_set = true;
122 }
123 
124 template <int SYM>
125 void
126 Elastic<SYM>::SetPsi(int amrlev, const amrex::MultiFab& a_psi_mf)
127 {
128  BL_PROFILE("Operator::Elastic::SetPsi()");
129  amrex::Box domain(m_geom[amrlev][0].Domain());
130 
131  for (MFIter mfi(a_psi_mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
132  {
133  Box bx = mfi.growntilebox() & domain;
134 
135  amrex::Array4<Set::Scalar> const& m_psi = (*(m_psi_mf[amrlev][0])).array(mfi);
136  amrex::Array4<const Set::Scalar> const& a_psi = a_psi_mf.array(mfi);
137 
138  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
139  m_psi(i, j, k) = a_psi(i, j, k);
140  });
141  }
142  m_psi_set = true;
143 }
144 
145 template<int SYM>
146 void
147 Elastic<SYM>::Fapply(int amrlev, int mglev, MultiFab& a_f, const MultiFab& a_u) const
148 {
149  BL_PROFILE("Operator::Elastic::Fapply()");
150 
151  amrex::Box domain(m_geom[amrlev][mglev].Domain());
152  domain.convert(amrex::IntVect::TheNodeVector());
153 
154  const Real* DX = m_geom[amrlev][mglev].CellSize();
155 
156  for (MFIter mfi(a_f, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
157  {
158  Box bx = mfi.validbox().grow(1) & domain;
159  amrex::Box tilebox = mfi.grownnodaltilebox() & bx;
160 
161  amrex::Array4<MATRIX4> const& DDW = (*(m_ddw_mf[amrlev][mglev])).array(mfi);
162  amrex::Array4<const amrex::Real> const& U = a_u.array(mfi);
163  amrex::Array4<amrex::Real> const& F = a_f.array(mfi);
164  amrex::Array4<Set::Scalar> const& psi = m_psi_mf[amrlev][mglev]->array(mfi);
165 
166  const Dim3 lo = amrex::lbound(domain), hi = amrex::ubound(domain);
167 
168  amrex::ParallelFor(tilebox, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
169 
170  Set::Vector f = Set::Vector::Zero();
171 
172  Set::Vector u;
173  for (int p = 0; p < AMREX_SPACEDIM; p++) u(p) = U(i, j, k, p);
174 
175 
176  bool AMREX_D_DECL(xmin = (i == lo.x), ymin = (j == lo.y), zmin = (k == lo.z)),
177  AMREX_D_DECL(xmax = (i == hi.x), ymax = (j == hi.y), zmax = (k == hi.z));
178 
179  // Determine if a special stencil will be necessary for first derivatives
180  std::array<Numeric::StencilType, AMREX_SPACEDIM>
181  sten = Numeric::GetStencil(i, j, k, domain);
182 
183  // The displacement gradient tensor
184  Set::Matrix gradu; // gradu(i,j) = u_{i,j)
185 
186  // Fill gradu
187  for (int p = 0; p < AMREX_SPACEDIM; p++)
188  {
189  AMREX_D_TERM(gradu(p, 0) = (Numeric::Stencil<Set::Scalar, 1, 0, 0>::D(U, i, j, k, p, DX, sten));,
190  gradu(p, 1) = (Numeric::Stencil<Set::Scalar, 0, 1, 0>::D(U, i, j, k, p, DX, sten));,
191  gradu(p, 2) = (Numeric::Stencil<Set::Scalar, 0, 0, 1>::D(U, i, j, k, p, DX, sten)););
192  }
193 
194  Set::Scalar psi_avg = 1.0;
195  if (m_psi_set) psi_avg = (1.0 - m_psi_small) * Numeric::Interpolate::CellToNodeAverage(psi, i, j, k, 0) + m_psi_small;
196 
197  // Stress tensor computed using the model fab
198  Set::Matrix sig = (DDW(i, j, k) * gradu) * psi_avg;
199 
200  // Boundary conditions
201  /// \todo Important: we need a way to handle corners and edges.
202  amrex::IntVect m(AMREX_D_DECL(i, j, k));
203  if (AMREX_D_TERM(xmax || xmin, || ymax || ymin, || zmax || zmin))
204  {
205  f = (*m_bc)(u, gradu, sig, i, j, k, domain);
206  }
207  else
208  {
209 
210 
211  // The gradient of the displacement gradient tensor
212  // TODO - replace with this call. But not for this PR
213  //Set::Matrix3 gradgradu = Numeric::Hessian(U,i,j,k,DX,sten); // gradgradu[k](l,j) = u_{k,lj}
214  Set::Matrix3 gradgradu; // gradgradu[k](l,j) = u_{k,lj}
215 
216  // Fill gradu and gradgradu
217  for (int p = 0; p < AMREX_SPACEDIM; p++)
218  {
219  // Diagonal terms:
220  AMREX_D_TERM(gradgradu(p, 0, 0) = (Numeric::Stencil<Set::Scalar, 2, 0, 0>::D(U, i, j, k, p, DX));,
221  gradgradu(p, 1, 1) = (Numeric::Stencil<Set::Scalar, 0, 2, 0>::D(U, i, j, k, p, DX));,
222  gradgradu(p, 2, 2) = (Numeric::Stencil<Set::Scalar, 0, 0, 2>::D(U, i, j, k, p, DX)););
223 
224  // Off-diagonal terms:
225  AMREX_D_TERM(,// 2D
226  gradgradu(p, 0, 1) = (Numeric::Stencil<Set::Scalar, 1, 1, 0>::D(U, i, j, k, p, DX));
227  gradgradu(p, 1, 0) = gradgradu(p, 0, 1);
228  ,// 3D
229  gradgradu(p, 0, 2) = (Numeric::Stencil<Set::Scalar, 1, 0, 1>::D(U, i, j, k, p, DX));
230  gradgradu(p, 1, 2) = (Numeric::Stencil<Set::Scalar, 0, 1, 1>::D(U, i, j, k, p, DX));
231  gradgradu(p, 2, 0) = gradgradu(p, 0, 2);
232  gradgradu(p, 2, 1) = gradgradu(p, 1, 2););
233  }
234 
235  //
236  // Operator
237  //
238  // The return value is
239  // f = C(grad grad u) + grad(C)*grad(u)
240  // In index notation
241  // f_i = C_{ijkl,j} u_{k,l} + C_{ijkl}u_{k,lj}
242  //
243 
244  f = (DDW(i, j, k) * gradgradu) * psi_avg;
245 
246  if (!m_uniform)
247  {
248  MATRIX4
249  AMREX_D_DECL(Cgrad1 = (Numeric::Stencil<MATRIX4, 1, 0, 0>::D(DDW, i, j, k, 0, DX, sten)),
250  Cgrad2 = (Numeric::Stencil<MATRIX4, 0, 1, 0>::D(DDW, i, j, k, 0, DX, sten)),
251  Cgrad3 = (Numeric::Stencil<MATRIX4, 0, 0, 1>::D(DDW, i, j, k, 0, DX, sten)));
252  f += (AMREX_D_TERM((Cgrad1 * gradu).col(0),
253  +(Cgrad2 * gradu).col(1),
254  +(Cgrad3 * gradu).col(2))) * (psi_avg);
255  }
256  if (m_psi_set)
257  {
258  Set::Vector gradpsi = Numeric::CellGradientOnNode(psi, i, j, k, 0, DX);
259  gradpsi *= (1.0 - m_psi_small);
260  f += (DDW(i, j, k) * gradu) * gradpsi;
261  }
262  }
263  AMREX_D_TERM(F(i, j, k, 0) = f[0];, F(i, j, k, 1) = f[1];, F(i, j, k, 2) = f[2];);
264  });
265  }
266 }
267 
268 
269 
270 template<int SYM>
271 void
272 Elastic<SYM>::Diagonal(int amrlev, int mglev, MultiFab& a_diag)
273 {
274  BL_PROFILE("Operator::Elastic::Diagonal()");
275 
276  amrex::Box domain(m_geom[amrlev][mglev].Domain());
277  domain.convert(amrex::IntVect::TheNodeVector());
278  const Real* DX = m_geom[amrlev][mglev].CellSize();
279 
280  for (MFIter mfi(a_diag, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
281  {
282  Box bx = mfi.validbox().grow(1) & domain;
283  amrex::Box tilebox = mfi.grownnodaltilebox() & bx;
284 
285  amrex::Array4<MATRIX4> const& DDW = (*(m_ddw_mf[amrlev][mglev])).array(mfi);
286  amrex::Array4<Set::Scalar> const& diag = a_diag.array(mfi);
287  amrex::Array4<Set::Scalar> const& psi = m_psi_mf[amrlev][mglev]->array(mfi);
288 
289  const Dim3 lo = amrex::lbound(domain), hi = amrex::ubound(domain);
290 
291  amrex::ParallelFor(tilebox, [=] AMREX_GPU_DEVICE(int i, int j, int k) {
292 
293  Set::Vector f = Set::Vector::Zero();
294 
295  bool AMREX_D_DECL(xmin = (i == lo.x), ymin = (j == lo.y), zmin = (k == lo.z)),
296  AMREX_D_DECL(xmax = (i == hi.x), ymax = (j == hi.y), zmax = (k == hi.z));
297 
298  Set::Scalar psi_avg = 1.0;
299  if (m_psi_set) psi_avg = (1.0 - m_psi_small) * Numeric::Interpolate::CellToNodeAverage(psi, i, j, k, 0) + m_psi_small;
300 
301  Set::Matrix gradu; // gradu(i,j) = u_{i,j)
302  Set::Matrix3 gradgradu; // gradgradu[k](l,j) = u_{k,lj}
303 
304  for (int p = 0; p < AMREX_SPACEDIM; p++)
305  {
306 
307  diag(i, j, k, p) = 0.0;
308  for (int q = 0; q < AMREX_SPACEDIM; q++)
309  {
310  AMREX_D_TERM(
311  gradu(q, 0) = ((!xmax ? 0.0 : (p == q ? 1.0 : 0.0)) - (!xmin ? 0.0 : (p == q ? 1.0 : 0.0))) / ((xmin || xmax ? 1.0 : 2.0) * DX[0]);,
312  gradu(q, 1) = ((!ymax ? 0.0 : (p == q ? 1.0 : 0.0)) - (!ymin ? 0.0 : (p == q ? 1.0 : 0.0))) / ((ymin || ymax ? 1.0 : 2.0) * DX[1]);,
313  gradu(q, 2) = ((!zmax ? 0.0 : (p == q ? 1.0 : 0.0)) - (!zmin ? 0.0 : (p == q ? 1.0 : 0.0))) / ((zmin || zmax ? 1.0 : 2.0) * DX[2]););
314 
315  AMREX_D_TERM(
316  gradgradu(q, 0, 0) = (p == q ? -2.0 : 0.0) / DX[0] / DX[0];
317  ,// 2D
318  gradgradu(q, 0, 1) = 0.0;
319  gradgradu(q, 1, 0) = 0.0;
320  gradgradu(q, 1, 1) = (p == q ? -2.0 : 0.0) / DX[1] / DX[1];
321  ,// 3D
322  gradgradu(q, 0, 2) = 0.0;
323  gradgradu(q, 1, 2) = 0.0;
324  gradgradu(q, 2, 0) = 0.0;
325  gradgradu(q, 2, 1) = 0.0;
326  gradgradu(q, 2, 2) = (p == q ? -2.0 : 0.0) / DX[2] / DX[2]);
327  }
328 
329 
330  amrex::IntVect m(AMREX_D_DECL(i, j, k));
331  if (AMREX_D_TERM(xmax || xmin, || ymax || ymin, || zmax || zmin))
332  {
333  Set::Matrix sig = DDW(i, j, k) * gradu * psi_avg;
334  Set::Vector u = Set::Vector::Zero();
335  u(p) = 1.0;
336  f = (*m_bc)(u, gradu, sig, i, j, k, domain);
337  diag(i, j, k, p) = f(p);
338  }
339  else
340  {
341  Set::Vector f = (DDW(i, j, k) * gradgradu) * psi_avg;
342  diag(i, j, k, p) += f(p);
343  }
344 
345 #ifdef AMREX_DEBUG
346  if (std::isnan(diag(i, j, k, p))) Util::Abort(INFO, "diagonal is nan at (", i, ",", j, ",", k, "), amrlev=", amrlev, ", mglev=", mglev);
347  if (std::isinf(diag(i, j, k, p))) Util::Abort(INFO, "diagonal is inf at (", i, ",", j, ",", k, "), amrlev=", amrlev, ", mglev=", mglev);
348  if (diag(i, j, k, p) == 0) Util::Abort(INFO, "diagonal is zero at (", i, ",", j, ",", k, "), amrlev=", amrlev, ", mglev=", mglev);
349 #endif
350 
351  }
352  });
353  }
354 }
355 
356 
357 template<int SYM>
358 void
359 Elastic<SYM>::Error0x(int amrlev, int mglev, MultiFab& R0x, const MultiFab& x) const
360 {
361  BL_PROFILE("Operator::Elastic::Error0x()");
363 
364  int ncomp = x.nComp();//getNComp();
365  int nghost = x.nGrow();
366 
367  if (!m_diagonal_computed)
368  Util::Abort(INFO, "Operator::Diagonal() must be called before using normalize");
369 
370  amrex::MultiFab D0x(x.boxArray(), x.DistributionMap(), ncomp, nghost);
371  amrex::MultiFab AD0x(x.boxArray(), x.DistributionMap(), ncomp, nghost);
372 
373  amrex::MultiFab::Copy(D0x, x, 0, 0, ncomp, nghost); // D0x = x
374  amrex::MultiFab::Divide(D0x, *m_diag[amrlev][mglev], 0, 0, ncomp, 0); // D0x = x/diag
375  amrex::MultiFab::Copy(AD0x, D0x, 0, 0, ncomp, nghost); // AD0x = D0x
376 
377  Fapply(amrlev, mglev, AD0x, D0x); // AD0x = A * D0 * x
378 
379  amrex::MultiFab::Copy(R0x, x, 0, 0, ncomp, nghost); // R0x = x
380  amrex::MultiFab::Subtract(R0x, AD0x, 0, 0, ncomp, nghost); // R0x = x - AD0x
381 }
382 
383 
384 template<int SYM>
385 void
386 Elastic<SYM>::FFlux(int /*amrlev*/, const MFIter& /*mfi*/,
387  const std::array<FArrayBox*, AMREX_SPACEDIM>& sigmafab,
388  const FArrayBox& /*ufab*/, const int /*face_only*/) const
389 {
390  BL_PROFILE("Operator::Elastic::FFlux()");
392  amrex::BaseFab<amrex::Real> AMREX_D_DECL(&fxfab = *sigmafab[0],
393  &fyfab = *sigmafab[1],
394  &fzfab = *sigmafab[2]);
395  AMREX_D_TERM(fxfab.setVal(0.0);,
396  fyfab.setVal(0.0);,
397  fzfab.setVal(0.0););
398 
399 }
400 
401 template<int SYM>
402 void
404  amrex::MultiFab& a_eps,
405  const amrex::MultiFab& a_u,
406  bool voigt) const
407 {
408  BL_PROFILE("Operator::Elastic::Strain()");
409 
410  const amrex::Real* DX = m_geom[amrlev][0].CellSize();
411  amrex::Box domain(m_geom[amrlev][0].Domain());
412  domain.convert(amrex::IntVect::TheNodeVector());
413 
414 
415  for (MFIter mfi(a_u, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
416  {
417  const Box& bx = mfi.tilebox();
418  amrex::Array4<amrex::Real> const& epsilon = a_eps.array(mfi);
419  amrex::Array4<const amrex::Real> const& u = a_u.array(mfi);
420  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k)
421  {
423 
424  std::array<Numeric::StencilType, AMREX_SPACEDIM> sten
425  = Numeric::GetStencil(i, j, k, domain);
426 
427  // Fill gradu
428  for (int p = 0; p < AMREX_SPACEDIM; p++)
429  {
430  AMREX_D_TERM(gradu(p, 0) = (Numeric::Stencil<Set::Scalar, 1, 0, 0>::D(u, i, j, k, p, DX, sten));,
431  gradu(p, 1) = (Numeric::Stencil<Set::Scalar, 0, 1, 0>::D(u, i, j, k, p, DX, sten));,
432  gradu(p, 2) = (Numeric::Stencil<Set::Scalar, 0, 0, 1>::D(u, i, j, k, p, DX, sten)););
433  }
434 
435  Set::Matrix eps = 0.5 * (gradu + gradu.transpose());
436 
437  if (voigt)
438  {
439  AMREX_D_PICK(epsilon(i, j, k, 0) = eps(0, 0);
440  ,
441  epsilon(i, j, k, 0) = eps(0, 0); epsilon(i, j, k, 1) = eps(1, 1); epsilon(i, j, k, 2) = eps(0, 1);
442  ,
443  epsilon(i, j, k, 0) = eps(0, 0); epsilon(i, j, k, 1) = eps(1, 1); epsilon(i, j, k, 2) = eps(2, 2);
444  epsilon(i, j, k, 3) = eps(1, 2); epsilon(i, j, k, 4) = eps(2, 0); epsilon(i, j, k, 5) = eps(0, 1););
445  }
446  else
447  {
448  AMREX_D_PICK(epsilon(i, j, k, 0) = eps(0, 0);
449  ,
450  epsilon(i, j, k, 0) = eps(0, 0); epsilon(i, j, k, 1) = eps(0, 1);
451  epsilon(i, j, k, 2) = eps(1, 0); epsilon(i, j, k, 3) = eps(1, 1);
452  ,
453  epsilon(i, j, k, 0) = eps(0, 0); epsilon(i, j, k, 1) = eps(0, 1); epsilon(i, j, k, 2) = eps(0, 2);
454  epsilon(i, j, k, 3) = eps(1, 0); epsilon(i, j, k, 4) = eps(1, 1); epsilon(i, j, k, 5) = eps(1, 2);
455  epsilon(i, j, k, 6) = eps(2, 0); epsilon(i, j, k, 7) = eps(2, 1); epsilon(i, j, k, 8) = eps(2, 2););
456  }
457  });
458  }
459 }
460 
461 
462 template<int SYM>
463 void
465  amrex::MultiFab& a_sigma,
466  const amrex::MultiFab& a_u,
467  bool voigt, bool a_homogeneous)
468 {
469  BL_PROFILE("Operator::Elastic::Stress()");
470  SetHomogeneous(a_homogeneous);
471 
472  const amrex::Real* DX = m_geom[amrlev][0].CellSize();
473  amrex::Box domain(m_geom[amrlev][0].Domain());
474  domain.convert(amrex::IntVect::TheNodeVector());
475 
476  for (MFIter mfi(a_u, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
477  {
478  const Box& bx = mfi.tilebox();
479  amrex::Array4<Set::Matrix4<AMREX_SPACEDIM, SYM>> const& DDW = (*(m_ddw_mf[amrlev][0])).array(mfi);
480  amrex::Array4<amrex::Real> const& sigma = a_sigma.array(mfi);
481  amrex::Array4<Set::Scalar> const& psi = m_psi_mf[amrlev][0]->array(mfi);
482  amrex::Array4<const amrex::Real> const& u = a_u.array(mfi);
483  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k)
484  {
486 
487  std::array<Numeric::StencilType, AMREX_SPACEDIM> sten
488  = Numeric::GetStencil(i, j, k, domain);
489 
490  // Fill gradu
491  for (int p = 0; p < AMREX_SPACEDIM; p++)
492  {
493  AMREX_D_TERM(gradu(p, 0) = (Numeric::Stencil<Set::Scalar, 1, 0, 0>::D(u, i, j, k, p, DX, sten));,
494  gradu(p, 1) = (Numeric::Stencil<Set::Scalar, 0, 1, 0>::D(u, i, j, k, p, DX, sten));,
495  gradu(p, 2) = (Numeric::Stencil<Set::Scalar, 0, 0, 1>::D(u, i, j, k, p, DX, sten)););
496  }
497 
498  Set::Scalar psi_avg = 1.0;
499  if (m_psi_set) psi_avg = (1.0 - m_psi_small) * Numeric::Interpolate::CellToNodeAverage(psi, i, j, k, 0) + m_psi_small;
500  Set::Matrix sig = (DDW(i, j, k) * gradu) * psi_avg;
501 
502  if (voigt)
503  {
504  AMREX_D_PICK(sigma(i, j, k, 0) = sig(0, 0);
505  ,
506  sigma(i, j, k, 0) = sig(0, 0); sigma(i, j, k, 1) = sig(1, 1); sigma(i, j, k, 2) = sig(0, 1);
507  ,
508  sigma(i, j, k, 0) = sig(0, 0); sigma(i, j, k, 1) = sig(1, 1); sigma(i, j, k, 2) = sig(2, 2);
509  sigma(i, j, k, 3) = sig(1, 2); sigma(i, j, k, 4) = sig(2, 0); sigma(i, j, k, 5) = sig(0, 1););
510  }
511  else
512  {
513  AMREX_D_PICK(sigma(i, j, k, 0) = sig(0, 0);
514  ,
515  sigma(i, j, k, 0) = sig(0, 0); sigma(i, j, k, 1) = sig(0, 1);
516  sigma(i, j, k, 2) = sig(1, 0); sigma(i, j, k, 3) = sig(1, 1);
517  ,
518  sigma(i, j, k, 0) = sig(0, 0); sigma(i, j, k, 1) = sig(0, 1); sigma(i, j, k, 2) = sig(0, 2);
519  sigma(i, j, k, 3) = sig(1, 0); sigma(i, j, k, 4) = sig(1, 1); sigma(i, j, k, 5) = sig(1, 2);
520  sigma(i, j, k, 6) = sig(2, 0); sigma(i, j, k, 7) = sig(2, 1); sigma(i, j, k, 8) = sig(2, 2););
521  }
522  });
523  }
524 }
525 
526 
527 template<int SYM>
528 void
530  amrex::MultiFab& a_energy,
531  const amrex::MultiFab& a_u, bool a_homogeneous)
532 {
533  BL_PROFILE("Operator::Elastic::Energy()");
534  SetHomogeneous(a_homogeneous);
535 
536  amrex::Box domain(m_geom[amrlev][0].Domain());
537  domain.convert(amrex::IntVect::TheNodeVector());
538 
539  const amrex::Real* DX = m_geom[amrlev][0].CellSize();
540 
541  for (MFIter mfi(a_u, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
542  {
543  const Box& bx = mfi.tilebox();
544  amrex::Array4<Set::Matrix4<AMREX_SPACEDIM, SYM>> const& DDW = (*(m_ddw_mf[amrlev][0])).array(mfi);
545  amrex::Array4<amrex::Real> const& energy = a_energy.array(mfi);
546  amrex::Array4<const amrex::Real> const& u = a_u.array(mfi);
547  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k)
548  {
550 
551  std::array<Numeric::StencilType, AMREX_SPACEDIM> sten
552  = Numeric::GetStencil(i, j, k, domain);
553 
554  // Fill gradu
555  for (int p = 0; p < AMREX_SPACEDIM; p++)
556  {
557  AMREX_D_TERM(gradu(p, 0) = (Numeric::Stencil<Set::Scalar, 1, 0, 0>::D(u, i, j, k, p, DX, sten));,
558  gradu(p, 1) = (Numeric::Stencil<Set::Scalar, 0, 1, 0>::D(u, i, j, k, p, DX, sten));,
559  gradu(p, 2) = (Numeric::Stencil<Set::Scalar, 0, 0, 1>::D(u, i, j, k, p, DX, sten)););
560  }
561 
562  Set::Matrix eps = .5 * (gradu + gradu.transpose());
563  Set::Matrix sig = DDW(i, j, k) * gradu;
564 
565  // energy(i,j,k) = (gradu.transpose() * sig).trace();
566 
567  //Util::Abort(INFO,"Fix this"); //
568  //energy(i,j,k) = C(i,j,k).W(gradu);
569  for (int m = 0; m < AMREX_SPACEDIM; m++)
570  {
571  for (int n = 0; n < AMREX_SPACEDIM; n++)
572  {
573  energy(i, j, k) += .5 * sig(m, n) * eps(m, n);
574  }
575  }
576  });
577  }
578 }
579 
580 template<int SYM>
581 void
583 {
584  BL_PROFILE("Elastic::averageDownCoeffs()");
585 
586  if (m_average_down_coeffs)
587  for (int amrlev = m_num_amr_levels - 1; amrlev > 0; --amrlev)
588  averageDownCoeffsDifferentAmrLevels(amrlev);
589 
590  averageDownCoeffsSameAmrLevel(0);
591  for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
592  {
593  for (int mglev = 0; mglev < m_num_mg_levels[amrlev]; ++mglev)
594  {
595  if (m_ddw_mf[amrlev][mglev]) {
596  FillBoundaryCoeff(*m_ddw_mf[amrlev][mglev], m_geom[amrlev][mglev]);
597  FillBoundaryCoeff(*m_psi_mf[amrlev][mglev], m_geom[amrlev][mglev]);
598  }
599  }
600  }
601 }
602 
603 template<int SYM>
604 void
606 {
607  BL_PROFILE("Operator::Elastic::averageDownCoeffsDifferentAmrLevels()");
608  Util::Assert(INFO, TEST(fine_amrlev > 0));
609 
610  const int crse_amrlev = fine_amrlev - 1;
611  const int ncomp = 1;
612 
613  MultiTab& crse_ddw = *m_ddw_mf[crse_amrlev][0];
614  MultiTab& fine_ddw = *m_ddw_mf[fine_amrlev][0];
615 
616  amrex::Box cdomain(m_geom[crse_amrlev][0].Domain());
617  cdomain.convert(amrex::IntVect::TheNodeVector());
618 
619  const Geometry& cgeom = m_geom[crse_amrlev][0];
620 
621  const BoxArray& fba = fine_ddw.boxArray();
622  const DistributionMapping& fdm = fine_ddw.DistributionMap();
623 
624  MultiTab fine_ddw_for_coarse(amrex::coarsen(fba, 2), fdm, ncomp, 2);
625  fine_ddw_for_coarse.ParallelCopy(crse_ddw, 0, 0, ncomp, 0, 0, cgeom.periodicity());
626 
627  const int coarse_fine_node = 1;
628  const int fine_fine_node = 2;
629 
630  amrex::iMultiFab nodemask(amrex::coarsen(fba, 2), fdm, 1, 2);
631  nodemask.ParallelCopy(*m_nd_fine_mask[crse_amrlev], 0, 0, 1, 0, 0, cgeom.periodicity());
632 
633  amrex::iMultiFab cellmask(amrex::convert(amrex::coarsen(fba, 2), amrex::IntVect::TheCellVector()), fdm, 1, 2);
634  cellmask.ParallelCopy(*m_cc_fine_mask[crse_amrlev], 0, 0, 1, 1, 1, cgeom.periodicity());
635 
636  for (MFIter mfi(fine_ddw_for_coarse, false); mfi.isValid(); ++mfi)
637  {
638  const Box& bx = mfi.validbox();
639 
640  amrex::Array4<const int> const& nmask = nodemask.array(mfi);
641  //amrex::Array4<const int> const& cmask = cellmask.array(mfi);
642 
643  amrex::Array4<MATRIX4> const& cdata = fine_ddw_for_coarse.array(mfi);
644  amrex::Array4<const MATRIX4> const& fdata = fine_ddw.array(mfi);
645 
646  const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
647 
648  for (int n = 0; n < fine_ddw.nComp(); n++)
649  {
650  // I,J,K == coarse coordinates
651  // i,j,k == fine coordinates
652  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int I, int J, int K) {
653  int i = I * 2, j = J * 2, k = K * 2;
654 
655  if (nmask(I, J, K) == fine_fine_node || nmask(I, J, K) == coarse_fine_node)
656  {
657  if ((I == lo.x || I == hi.x) &&
658  (J == lo.y || J == hi.y) &&
659  (K == lo.z || K == hi.z)) // Corner
660  cdata(I, J, K, n) = fdata(i, j, k, n);
661  else if ((J == lo.y || J == hi.y) &&
662  (K == lo.z || K == hi.z)) // X edge
663  cdata(I, J, K, n) = fdata(i - 1, j, k, n) * 0.25 + fdata(i, j, k, n) * 0.5 + fdata(i + 1, j, k, n) * 0.25;
664  else if ((K == lo.z || K == hi.z) &&
665  (I == lo.x || I == hi.x)) // Y edge
666  cdata(I, J, K, n) = fdata(i, j - 1, k, n) * 0.25 + fdata(i, j, k, n) * 0.5 + fdata(i, j + 1, k, n) * 0.25;
667  else if ((I == lo.x || I == hi.x) &&
668  (J == lo.y || J == hi.y)) // Z edge
669  cdata(I, J, K, n) = fdata(i, j, k - 1, n) * 0.25 + fdata(i, j, k, n) * 0.5 + fdata(i, j, k + 1, n) * 0.25;
670  else if (I == lo.x || I == hi.x) // X face
671  cdata(I, J, K, n) =
672  (fdata(i, j - 1, k - 1, n) + fdata(i, j, k - 1, n) * 2.0 + fdata(i, j + 1, k - 1, n)
673  + fdata(i, j - 1, k, n) * 2.0 + fdata(i, j, k, n) * 4.0 + fdata(i, j + 1, k, n) * 2.0
674  + fdata(i, j - 1, k + 1, n) + fdata(i, j, k + 1, n) * 2.0 + fdata(i, j + 1, k + 1, n)) / 16.0;
675  else if (J == lo.y || J == hi.y) // Y face
676  cdata(I, J, K, n) =
677  (fdata(i - 1, j, k - 1, n) + fdata(i - 1, j, k, n) * 2.0 + fdata(i - 1, j, k + 1, n)
678  + fdata(i, j, k - 1, n) * 2.0 + fdata(i, j, k, n) * 4.0 + fdata(i, j, k + 1, n) * 2.0
679  + fdata(i + 1, j, k - 1, n) + fdata(i + 1, j, k, n) * 2.0 + fdata(i + 1, j, k + 1, n)) / 16.0;
680  else if (K == lo.z || K == hi.z) // Z face
681  cdata(I, J, K, n) =
682  (fdata(i - 1, j - 1, k, n) + fdata(i, j - 1, k, n) * 2.0 + fdata(i + 1, j - 1, k, n)
683  + fdata(i - 1, j, k, n) * 2.0 + fdata(i, j, k, n) * 4.0 + fdata(i + 1, j, k, n) * 2.0
684  + fdata(i - 1, j + 1, k, n) + fdata(i, j + 1, k, n) * 2.0 + fdata(i + 1, j + 1, k, n)) / 16.0;
685  else // Interior
686  cdata(I, J, K, n) =
687  (fdata(i - 1, j - 1, k - 1, n) + fdata(i - 1, j - 1, k + 1, n) + fdata(i - 1, j + 1, k - 1, n) + fdata(i - 1, j + 1, k + 1, n) +
688  fdata(i + 1, j - 1, k - 1, n) + fdata(i + 1, j - 1, k + 1, n) + fdata(i + 1, j + 1, k - 1, n) + fdata(i + 1, j + 1, k + 1, n)) / 64.0
689  +
690  (fdata(i, j - 1, k - 1, n) + fdata(i, j - 1, k + 1, n) + fdata(i, j + 1, k - 1, n) + fdata(i, j + 1, k + 1, n) +
691  fdata(i - 1, j, k - 1, n) + fdata(i + 1, j, k - 1, n) + fdata(i - 1, j, k + 1, n) + fdata(i + 1, j, k + 1, n) +
692  fdata(i - 1, j - 1, k, n) + fdata(i - 1, j + 1, k, n) + fdata(i + 1, j - 1, k, n) + fdata(i + 1, j + 1, k, n)) / 32.0
693  +
694  (fdata(i - 1, j, k, n) + fdata(i, j - 1, k, n) + fdata(i, j, k - 1, n) +
695  fdata(i + 1, j, k, n) + fdata(i, j + 1, k, n) + fdata(i, j, k + 1, n)) / 16.0
696  +
697  fdata(i, j, k, n) / 8.0;
698 
699 #ifdef AMREX_DEBUG
700  if (cdata(i, j, k).contains_nan()) Util::Abort(INFO, "restricted model is nan at (", i, ",", j, ",", k, "), fine_amrlev=", fine_amrlev);
701 #endif
702  }
703 
704  });
705  }
706  }
707 
708  // Copy the fine residual restricted onto the coarse grid
709  // into the final residual.
710  crse_ddw.ParallelCopy(fine_ddw_for_coarse, 0, 0, ncomp, 0, 0, cgeom.periodicity());
711  const int mglev = 0;
712  Util::RealFillBoundary(crse_ddw, m_geom[crse_amrlev][mglev]);
713  return;
714 }
715 
716 
717 
718 template<int SYM>
719 void
721 {
722  BL_PROFILE("Elastic::averageDownCoeffsSameAmrLevel()");
723 
724  for (int mglev = 1; mglev < m_num_mg_levels[amrlev]; ++mglev)
725  {
726  amrex::Box cdomain(m_geom[amrlev][mglev].Domain());
727  cdomain.convert(amrex::IntVect::TheNodeVector());
728  amrex::Box fdomain(m_geom[amrlev][mglev - 1].Domain());
729  fdomain.convert(amrex::IntVect::TheNodeVector());
730 
731  MultiTab& crse = *m_ddw_mf[amrlev][mglev];
732  MultiTab& fine = *m_ddw_mf[amrlev][mglev - 1];
733 
734  amrex::BoxArray crseba = crse.boxArray();
735  amrex::BoxArray fineba = fine.boxArray();
736 
737  BoxArray newba = crseba;
738  newba.refine(2);
739  MultiTab fine_on_crseba;
740  fine_on_crseba.define(newba, crse.DistributionMap(), 1, 4);
741  fine_on_crseba.ParallelCopy(fine, 0, 0, 1, 2, 4, m_geom[amrlev][mglev].periodicity());
742 
743  for (MFIter mfi(crse, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
744  {
745  Box bx = mfi.tilebox();
746  bx = bx & cdomain;
747 
748  amrex::Array4<const Set::Matrix4<AMREX_SPACEDIM, SYM>> const& fdata = fine_on_crseba.array(mfi);
749  amrex::Array4<Set::Matrix4<AMREX_SPACEDIM, SYM>> const& cdata = crse.array(mfi);
750 
751  const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
752 
753  // I,J,K == coarse coordinates
754  // i,j,k == fine coordinates
755  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int I, int J, int K) {
756  int i = 2 * I, j = 2 * J, k = 2 * K;
757 
758  if ((I == lo.x || I == hi.x) &&
759  (J == lo.y || J == hi.y) &&
760  (K == lo.z || K == hi.z)) // Corner
761  cdata(I, J, K) = fdata(i, j, k);
762  else if ((J == lo.y || J == hi.y) &&
763  (K == lo.z || K == hi.z)) // X edge
764  cdata(I, J, K) = fdata(i - 1, j, k) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i + 1, j, k) * 0.25;
765  else if ((K == lo.z || K == hi.z) &&
766  (I == lo.x || I == hi.x)) // Y edge
767  cdata(I, J, K) = fdata(i, j - 1, k) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i, j + 1, k) * 0.25;
768  else if ((I == lo.x || I == hi.x) &&
769  (J == lo.y || J == hi.y)) // Z edge
770  cdata(I, J, K) = fdata(i, j, k - 1) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i, j, k + 1) * 0.25;
771  else if (I == lo.x || I == hi.x) // X face
772  cdata(I, J, K) =
773  (fdata(i, j - 1, k - 1) + fdata(i, j, k - 1) * 2.0 + fdata(i, j + 1, k - 1)
774  + fdata(i, j - 1, k) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i, j + 1, k) * 2.0
775  + fdata(i, j - 1, k + 1) + fdata(i, j, k + 1) * 2.0 + fdata(i, j + 1, k + 1)) / 16.0;
776  else if (J == lo.y || J == hi.y) // Y face
777  cdata(I, J, K) =
778  (fdata(i - 1, j, k - 1) + fdata(i - 1, j, k) * 2.0 + fdata(i - 1, j, k + 1)
779  + fdata(i, j, k - 1) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i, j, k + 1) * 2.0
780  + fdata(i + 1, j, k - 1) + fdata(i + 1, j, k) * 2.0 + fdata(i + 1, j, k + 1)) / 16.0;
781  else if (K == lo.z || K == hi.z) // Z face
782  cdata(I, J, K) =
783  (fdata(i - 1, j - 1, k) + fdata(i, j - 1, k) * 2.0 + fdata(i + 1, j - 1, k)
784  + fdata(i - 1, j, k) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i + 1, j, k) * 2.0
785  + fdata(i - 1, j + 1, k) + fdata(i, j + 1, k) * 2.0 + fdata(i + 1, j + 1, k)) / 16.0;
786  else // Interior
787  cdata(I, J, K) =
788  (fdata(i - 1, j - 1, k - 1) + fdata(i - 1, j - 1, k + 1) + fdata(i - 1, j + 1, k - 1) + fdata(i - 1, j + 1, k + 1) +
789  fdata(i + 1, j - 1, k - 1) + fdata(i + 1, j - 1, k + 1) + fdata(i + 1, j + 1, k - 1) + fdata(i + 1, j + 1, k + 1)) / 64.0
790  +
791  (fdata(i, j - 1, k - 1) + fdata(i, j - 1, k + 1) + fdata(i, j + 1, k - 1) + fdata(i, j + 1, k + 1) +
792  fdata(i - 1, j, k - 1) + fdata(i + 1, j, k - 1) + fdata(i - 1, j, k + 1) + fdata(i + 1, j, k + 1) +
793  fdata(i - 1, j - 1, k) + fdata(i - 1, j + 1, k) + fdata(i + 1, j - 1, k) + fdata(i + 1, j + 1, k)) / 32.0
794  +
795  (fdata(i - 1, j, k) + fdata(i, j - 1, k) + fdata(i, j, k - 1) +
796  fdata(i + 1, j, k) + fdata(i, j + 1, k) + fdata(i, j, k + 1)) / 16.0
797  +
798  fdata(i, j, k) / 8.0;
799 
800 #ifdef AMREX_DEBUG
801  if (cdata(I, J, K).contains_nan()) Util::Abort(INFO, "restricted model is nan at crse coordinates (I=", I, ",J=", J, ",K=", k, "), amrlev=", amrlev, " interpolating from mglev", mglev - 1, " to ", mglev);
802 #endif
803  });
804  }
805  FillBoundaryCoeff(crse, m_geom[amrlev][mglev]);
806 
807 
808  if (!m_psi_set) continue;
809 
810  amrex::Box cdomain_cell(m_geom[amrlev][mglev].Domain());
811  amrex::Box fdomain_cell(m_geom[amrlev][mglev - 1].Domain());
812  MultiFab& crse_psi = *m_psi_mf[amrlev][mglev];
813  MultiFab& fine_psi = *m_psi_mf[amrlev][mglev - 1];
814  MultiFab fine_psi_on_crseba;
815  fine_psi_on_crseba.define(newba.convert(amrex::IntVect::TheCellVector()), crse_psi.DistributionMap(), 1, 1);
816  fine_psi_on_crseba.ParallelCopy(fine_psi, 0, 0, 1, 1, 1, m_geom[amrlev][mglev].periodicity());
817 
818  for (MFIter mfi(crse_psi, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
819  {
820  Box bx = mfi.tilebox();
821  bx = bx & cdomain_cell;
822 
823  amrex::Array4<const Set::Scalar> const& fdata = fine_psi_on_crseba.array(mfi);
824  amrex::Array4<Set::Scalar> const& cdata = crse_psi.array(mfi);
825 
826  const Dim3 lo = amrex::lbound(cdomain), hi = amrex::ubound(cdomain);
827 
828  // I,J,K == coarse coordinates
829  // i,j,k == fine coordinates
830  amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int I, int J, int K) {
831  int i = 2 * I, j = 2 * J, k = 2 * K;
832 
833  if ((I == lo.x || I == hi.x) &&
834  (J == lo.y || J == hi.y) &&
835  (K == lo.z || K == hi.z)) // Corner
836  cdata(I, J, K) = fdata(i, j, k);
837  else if ((J == lo.y || J == hi.y) &&
838  (K == lo.z || K == hi.z)) // X edge
839  cdata(I, J, K) = fdata(i - 1, j, k) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i + 1, j, k) * 0.25;
840  else if ((K == lo.z || K == hi.z) &&
841  (I == lo.x || I == hi.x)) // Y edge
842  cdata(I, J, K) = fdata(i, j - 1, k) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i, j + 1, k) * 0.25;
843  else if ((I == lo.x || I == hi.x) &&
844  (J == lo.y || J == hi.y)) // Z edge
845  cdata(I, J, K) = fdata(i, j, k - 1) * 0.25 + fdata(i, j, k) * 0.5 + fdata(i, j, k + 1) * 0.25;
846  else if (I == lo.x || I == hi.x) // X face
847  cdata(I, J, K) =
848  (fdata(i, j - 1, k - 1) + fdata(i, j, k - 1) * 2.0 + fdata(i, j + 1, k - 1)
849  + fdata(i, j - 1, k) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i, j + 1, k) * 2.0
850  + fdata(i, j - 1, k + 1) + fdata(i, j, k + 1) * 2.0 + fdata(i, j + 1, k + 1)) / 16.0;
851  else if (J == lo.y || J == hi.y) // Y face
852  cdata(I, J, K) =
853  (fdata(i - 1, j, k - 1) + fdata(i - 1, j, k) * 2.0 + fdata(i - 1, j, k + 1)
854  + fdata(i, j, k - 1) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i, j, k + 1) * 2.0
855  + fdata(i + 1, j, k - 1) + fdata(i + 1, j, k) * 2.0 + fdata(i + 1, j, k + 1)) / 16.0;
856  else if (K == lo.z || K == hi.z) // Z face
857  cdata(I, J, K) =
858  (fdata(i - 1, j - 1, k) + fdata(i, j - 1, k) * 2.0 + fdata(i + 1, j - 1, k)
859  + fdata(i - 1, j, k) * 2.0 + fdata(i, j, k) * 4.0 + fdata(i + 1, j, k) * 2.0
860  + fdata(i - 1, j + 1, k) + fdata(i, j + 1, k) * 2.0 + fdata(i + 1, j + 1, k)) / 16.0;
861  else // Interior
862  cdata(I, J, K) =
863  (fdata(i - 1, j - 1, k - 1) + fdata(i - 1, j - 1, k + 1) + fdata(i - 1, j + 1, k - 1) + fdata(i - 1, j + 1, k + 1) +
864  fdata(i + 1, j - 1, k - 1) + fdata(i + 1, j - 1, k + 1) + fdata(i + 1, j + 1, k - 1) + fdata(i + 1, j + 1, k + 1)) / 64.0
865  +
866  (fdata(i, j - 1, k - 1) + fdata(i, j - 1, k + 1) + fdata(i, j + 1, k - 1) + fdata(i, j + 1, k + 1) +
867  fdata(i - 1, j, k - 1) + fdata(i + 1, j, k - 1) + fdata(i - 1, j, k + 1) + fdata(i + 1, j, k + 1) +
868  fdata(i - 1, j - 1, k) + fdata(i - 1, j + 1, k) + fdata(i + 1, j - 1, k) + fdata(i + 1, j + 1, k)) / 32.0
869  +
870  (fdata(i - 1, j, k) + fdata(i, j - 1, k) + fdata(i, j, k - 1) +
871  fdata(i + 1, j, k) + fdata(i, j + 1, k) + fdata(i, j, k + 1)) / 16.0
872  +
873  fdata(i, j, k) / 8.0;
874  });
875  }
876  FillBoundaryCoeff(crse_psi, m_geom[amrlev][mglev]);
877 
878  }
879 }
880 
881 template<int SYM>
882 void
883 Elastic<SYM>::FillBoundaryCoeff(MultiTab& sigma, const Geometry& geom)
884 {
885  BL_PROFILE("Elastic::FillBoundaryCoeff()");
886  for (int i = 0; i < 2; i++)
887  {
888  MultiTab& mf = sigma;
889  mf.FillBoundary(geom.periodicity());
890  const int ncomp = mf.nComp();
891  const int ng1 = 1;
892  const int ng2 = 2;
893  MultiTab tmpmf(mf.boxArray(), mf.DistributionMap(), ncomp, ng1);
894  tmpmf.ParallelCopy(mf, 0, 0, ncomp, ng2, ng1, geom.periodicity());
895  mf.ParallelCopy(tmpmf, 0, 0, ncomp, ng1, ng2, geom.periodicity());
896  }
897 }
898 
899 template<int SYM>
900 void
901 Elastic<SYM>::FillBoundaryCoeff(MultiFab& psi, const Geometry& geom)
902 {
903  BL_PROFILE("Elastic::FillBoundaryCoeff()");
904  for (int i = 0; i < 2; i++)
905  {
906  MultiFab& mf = psi;
907  mf.FillBoundary(geom.periodicity());
908  const int ncomp = mf.nComp();
909  const int ng1 = 1;
910  const int ng2 = 2;
911  MultiFab tmpmf(mf.boxArray(), mf.DistributionMap(), ncomp, ng1);
912  tmpmf.ParallelCopy(mf, 0, 0, ncomp, ng2, ng1, geom.periodicity());
913  mf.ParallelCopy(tmpmf, 0, 0, ncomp, ng1, ng2, geom.periodicity());
914  }
915 }
916 
917 template class Elastic<Set::Sym::Major>;
918 template class Elastic<Set::Sym::Isotropic>;
919 template class Elastic<Set::Sym::MajorMinor>;
920 template class Elastic<Set::Sym::Diagonal>;
921 
922 }
Numeric::CellGradientOnNode
AMREX_FORCE_INLINE Set::Vector CellGradientOnNode(const amrex::Array4< const Set::Scalar > &f, const int &i, const int &j, const int &k, const int &m, const Set::Scalar dx[AMREX_SPACEDIM])
Definition: Stencil.H:673
Model::Solid::gradu
@ gradu
Definition: Solid.H:26
Operator::Elastic::Elastic
Elastic()
Definition: Elastic.H:31
Numeric::Interpolate::CellToNodeAverage
static AMREX_FORCE_INLINE T CellToNodeAverage(const amrex::Array4< const T > &f, const int &i, const int &j, const int &k, const int &m, std::array< StencilType, AMREX_SPACEDIM > stencil=DefaultType)
Definition: Stencil.H:1256
Util::RealFillBoundary
AMREX_FORCE_INLINE void RealFillBoundary(amrex::FabArray< amrex::BaseFab< T >> &a_mf, const amrex::Geometry &)
Definition: Util.H:307
Operator::Elastic
Definition: Elastic.H:22
Operator
Documentation for operator namespace.
Definition: Diagonal.cpp:13
TEST
#define TEST(x)
Definition: Util.H:21
Operator::Elastic::averageDownCoeffsDifferentAmrLevels
void averageDownCoeffsDifferentAmrLevels(int fine_amrlev)
Definition: Elastic.cpp:605
Operator::Elastic::~Elastic
virtual ~Elastic()
Definition: Elastic.cpp:21
Operator::Elastic::FFlux
virtual void FFlux(int amrlev, const MFIter &mfi, const std::array< FArrayBox *, AMREX_SPACEDIM > &flux, const FArrayBox &sol, const int face_only=0) const final
Definition: Elastic.cpp:386
Operator::Elastic::Fapply
virtual void Fapply(int amrlev, int mglev, MultiFab &out, const MultiFab &in) const override final
Definition: Elastic.cpp:147
Set::Vector
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, 1 > Vector
Definition: Base.H:20
Operator::AMREX_D_DECL
constexpr amrex::IntVect AMREX_D_DECL(Operator< Grid::Cell >::dx, Operator< Grid::Cell >::dy, Operator< Grid::Cell >::dz)
Elastic.H
Operator::Elastic::Diagonal
virtual void Diagonal(int amrlev, int mglev, amrex::MultiFab &diag) override
Definition: Elastic.cpp:272
Operator::Elastic< Model::Solid::Affine::Isotropic ::sym >::MultiTab
amrex::FabArray< TArrayBox > MultiTab
Definition: Elastic.H:26
Set::Scalar
amrex::Real Scalar
Definition: Base.H:19
Set::Matrix3
Definition: Matrix3.H:8
Set::Matrix
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, AMREX_SPACEDIM > Matrix
Definition: Base.H:23
Operator::Elastic::define
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FArrayBox > const * > &a_factory={})
Definition: Elastic.cpp:26
Operator::Elastic::averageDownCoeffs
virtual void averageDownCoeffs() override
Definition: Elastic.cpp:582
Model::Solid::epsilon
@ epsilon
Definition: Solid.H:26
Operator::Elastic::FillBoundaryCoeff
void FillBoundaryCoeff(MultiTab &sigma, const Geometry &geom)
Definition: Elastic.cpp:883
Operator::Elastic::averageDownCoeffsSameAmrLevel
void averageDownCoeffsSameAmrLevel(int amrlev)
Update coarse-level AMR coefficients with data from fine level.
Definition: Elastic.cpp:720
Util::Assert
AMREX_FORCE_INLINE void Assert(std::string file, std::string func, int line, std::string smt, bool pass, Args const &... args)
Definition: Util.H:65
Util::Abort
void Abort(const char *msg)
Definition: Util.cpp:165
Set::Matrix4< AMREX_SPACEDIM, SYM >
Operator::Elastic::Strain
void Strain(int amrlev, amrex::MultiFab &epsfab, const amrex::MultiFab &ufab, bool voigt=false) const
Definition: Elastic.cpp:403
Operator::Elastic::Energy
void Energy(int amrlev, amrex::MultiFab &energy, const amrex::MultiFab &u, bool a_homogeneous=false)
Definition: Elastic.cpp:529
Set.H
Numeric::Stencil
Definition: Stencil.H:38
Operator::Elastic::Stress
void Stress(int amrlev, amrex::MultiFab &sigmafab, const amrex::MultiFab &ufab, bool voigt=false, bool a_homogeneous=false)
Definition: Elastic.cpp:464
Operator::Elastic::Error0x
void Error0x(int amrlev, int mglev, MultiFab &R0x, const MultiFab &x) const
Definition: Elastic.cpp:359
Operator::Elastic::SetPsi
void SetPsi(int amrlev, const amrex::MultiFab &a_psi)
Definition: Elastic.cpp:126
INFO
#define INFO
Definition: Util.H:20
Operator::Elastic::SetModel
void SetModel(Set::Matrix4< AMREX_SPACEDIM, SYM > &a_model)
Definition: Elastic.cpp:59
Numeric::GetStencil
static AMREX_FORCE_INLINE std::array< StencilType, AMREX_SPACEDIM > GetStencil(const int i, const int j, const int k, const amrex::Box domain)
Definition: Stencil.H:20
Model::Solid::F
@ F
Definition: Solid.H:26
Util::Message
void Message(std::string file, std::string func, int line, Args const &... args)
Definition: Util.H:133