1 #ifndef MODEL_SOLID_AFFINE_ISOTROPIC_H_
2 #define MODEL_SOLID_AFFINE_ISOTROPIC_H_
22 Define(a_mu, a_lambda, a_F0);
38 return 0.5 * ((
F -
F0).transpose() * (
ddw * ((
F -
F0)))).trace();
48 virtual void Print(std::ostream& out)
const override
50 out <<
"lambda=" <<
ddw.Lambda() <<
", mu=" <<
ddw.Mu() <<
", F0=\n" <<
F0;
54 if (
F0.hasNaN())
return true;
55 if (
ddw.contains_nan())
return true;
91 lambda = E * nu / (1.0 + nu) / (1.0 - 2.0 * nu);
92 mu = E / 2.0 / (1.0 + nu);
99 lambda = mu * (E - 2.0 * mu) / (3.0 * mu - E);
106 mu = (E - 3.0 * lambda + sqrt(E * E + 9.0 * lambda * lambda + 2.0 * E * lambda)) / 4.0;
113 lambda = 2.0 * mu * nu / (1.0 - 2.0 * nu);
120 mu = lambda * (1.0 - 2.0 * nu) / 2.0 / nu;
127 lambda = K - (2.0 * mu / 3.0);
131 Util::Exception(
INFO,
"Model parameters not specified with either (lame, shear), or (E, nu)");
139 #define OP_CLASS Isotropic
140 #define OP_VARS X(ddw) X(F0)
152 return AMREX_SPACEDIM * AMREX_SPACEDIM + 2;
158 if (i == 0)
return name +
"_lambda";
159 if (i == 1)
return name +
"_mu";
160 #if AMREX_SPACEDIM==2
161 if (i == 2)
return name +
"_F0_xx";
162 if (i == 3)
return name +
"_F0_xy";
163 if (i == 4)
return name +
"_F0_yx";
164 if (i == 5)
return name +
"_F0_yy";
165 #elif AMREX_SPACEDIM==3
166 if (i == 2)
return name +
"_F0_xx";
167 if (i == 3)
return name +
"_F0_xy";
168 if (i == 4)
return name +
"_F0_xy";
169 if (i == 5)
return name +
"_F0_yx";
170 if (i == 6)
return name +
"_F0_yy";
171 if (i == 7)
return name +
"_F0_yz";
172 if (i == 8)
return name +
"_F0_zy";
173 if (i == 9)
return name +
"_F0_zx";
174 if (i == 10)
return name +
"_F0_zz";
182 for (amrex::MFIter mfi(a_dst, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
184 const amrex::Box& bx = mfi.growntilebox(amrex::IntVect(a_nghost));
187 amrex::Array4<const Model::Solid::Affine::Isotropic>
const& src = ((*this)[a_lev])->array(mfi);
188 amrex::Array4<Set::Scalar>
const& dst = a_dst.array(mfi);
189 for (
int n = 0; n < AMREX_SPACEDIM * AMREX_SPACEDIM; n++)
191 amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) {
192 dst(i, j, k, a_dstcomp + 0) = src(i, j, k).ddw.Lambda();
193 dst(i, j, k, a_dstcomp + 1) = src(i, j, k).ddw.Mu();
194 #if AMREX_SPACEDIM==2
195 dst(i, j, k, a_dstcomp + 2) = src(i, j, k).F0(0, 0);
196 dst(i, j, k, a_dstcomp + 3) = src(i, j, k).F0(0, 1);
197 dst(i, j, k, a_dstcomp + 4) = src(i, j, k).F0(1, 0);
198 dst(i, j, k, a_dstcomp + 5) = src(i, j, k).F0(1, 1);
199 #elif AMREX_SPACEDIM==3
200 dst(i, j, k, a_dstcomp + 2) = src(i, j, k).F0(0, 0);
201 dst(i, j, k, a_dstcomp + 3) = src(i, j, k).F0(0, 1);
202 dst(i, j, k, a_dstcomp + 4) = src(i, j, k).F0(0, 2);
203 dst(i, j, k, a_dstcomp + 5) = src(i, j, k).F0(1, 0);
204 dst(i, j, k, a_dstcomp + 6) = src(i, j, k).F0(1, 1);
205 dst(i, j, k, a_dstcomp + 7) = src(i, j, k).F0(1, 2);
206 dst(i, j, k, a_dstcomp + 8) = src(i, j, k).F0(2, 0);
207 dst(i, j, k, a_dstcomp + 9) = src(i, j, k).F0(2, 1);
208 dst(i, j, k, a_dstcomp + 10) = src(i, j, k).F0(2, 2);