Alamo
Matrix4_Isotropic.H
Go to the documentation of this file.
1//
2// The isotropic tensor is defined to be
3//
4// .. math::
5//
6// C_{ijkl} = \mu(d_{il}d_{jk} + d_{ik}d_{jl}) + \lambda d_{ij} d_{kl}
7//
8// The inverse ("compliance") tensor is
9//
10// .. math::
11//
12// S_{ijkl} = ((1+\nu)/2E)(d_{il}d_{jl} + d_{ik}d_{jl}) - (\nu/E)d_{ij}d_{kl}
13//
14// Replacing E, nu with Lame constants gives:
15//
16// .. math::
17//
18// S_{ijkl} = (1/(4 \mu))(d_{il}d_{jl} + d_{ik}d_{jl}) + (\lambda/(2*\mu*(3\lambda+2\mu))) * d_{ij} * d_{kl}
19//
20// For reference: http://solidmechanics.org/text/Chapter3_2/Chapter3_2.htm
21// https://en.wikipedia.org/wiki/Lam%C3%A9_parameters
22//
23
24#ifndef SET_MATRIX4_ISOTROPIC_H
25#define SET_MATRIX4_ISOTROPIC_H
26
27#include "Util/Util.H"
28#include "Base.H"
29
30namespace Set
31{
32template<>
34{
35 Set::Scalar lambda=NAN, mu=NAN;
36public:
39
40 /// Note: for the Isotropic Matrix4 this routine works for **retrieval only**!
41 /// If you try to assign a value using this with, say
42 ///
43 /// isotropicmatrix4(i,j,k,l) = 8.0
44 ///
45 /// you will get a `lvalue required as left operand of assignment` compile error.
46 /// You should probably consider using a lower symmetry operator.
48 Scalar operator () (const int i, const int j, const int k, const int l) const
49 {
50 Set::Scalar ret = 0.0;
51 if (i==k && j==l) ret += mu;
52 if (i==l && j==k) ret += mu;
53 if (i==j && k==l) ret += lambda;
54 return ret;
55 }
56 void Randomize()
57 {
58 lambda = Util::Random();
59 mu = Util::Random();
60 }
61 void Print (std::ostream& os )
62 {
63 os << "lambda = " << lambda << " mu = " << mu;
64 }
66 {
67 return lambda;
68 }
69 Set::Scalar Mu () const
70 {
71 return mu;
72 }
74 {
75 return mu * ( 3.0*lambda + 2.0*mu ) / (lambda + mu);
76 }
78 {
79 return 0.5 * lambda / (lambda + mu);
80 }
88
89
91 {
93 inv.mu = 1./4./mu;
94 inv.lambda = lambda / (2*mu*(3*lambda + 2*mu));
95 return inv;
96 }
106
107 //AMREX_GPU_HOST_DEVICE void operator = (Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda = a.lambda; mu = a.mu;}
108 AMREX_GPU_HOST_DEVICE void operator += (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda += a.lambda; mu += a.mu;}
109 AMREX_GPU_HOST_DEVICE void operator -= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda -= a.lambda; mu -= a.mu;}
110 AMREX_GPU_HOST_DEVICE void operator *= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda *= a.lambda; mu *= a.mu;}
111 AMREX_GPU_HOST_DEVICE void operator /= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda /= a.lambda; mu /= a.mu;}
112 AMREX_GPU_HOST_DEVICE void operator *= (const Set::Scalar &alpha) {lambda *= alpha; mu *= alpha;}
113 AMREX_GPU_HOST_DEVICE void operator /= (const Set::Scalar &alpha) {lambda /= alpha; mu /= alpha;}
114
116 {
117 return std::sqrt(lambda*lambda + mu*mu);
118 }
119
120 bool contains_nan() const
121 {
122 if (std::isnan(lambda)) return true;
123 if (std::isnan(mu)) return true;
124 return false;
125 }
126};
129{
131
132 #if AMREX_SPACEDIM == 2
133 ret(0,0) = (a.lambda + 2.*a.mu) * b(0,0) + a.lambda *b(1,1);
134 ret(1,1) = a.lambda * b(0,0) + (a.lambda + 2.*a.mu)*b(1,1);
135 ret(0,1) = a.mu*(b(0,1) + b(1,0)); ret(1,0) = ret(0,1);
136
137 #elif AMREX_SPACEDIM == 3
138 ret(0,0) = (a.lambda + 2.*a.mu) * b(0,0) + a.lambda *b(1,1) + a.lambda *b(2,2);
139 ret(1,1) = a.lambda * b(0,0) + (a.lambda + 2.*a.mu)*b(1,1) + a.lambda *b(2,2);
140 ret(2,2) = a.lambda * b(0,0) + a.lambda *b(1,1) + (a.lambda + 2.*a.mu)*b(2,2);
141 ret(1,2) = a.mu*(b(1,2) + b(2,1)); ret(2,1) = ret(1,2);
142 ret(2,0) = a.mu*(b(2,0) + b(0,2)); ret(0,2) = ret(2,0);
143 ret(0,1) = a.mu*(b(0,1) + b(1,0)); ret(1,0) = ret(0,1);
144
145 #endif
146 return ret;
147}
151
152
155{
156 Set::Vector ret = Set::Vector::Zero();
157
158 for (int i = 0; i < AMREX_SPACEDIM; i++)
159 for (int j=0; j < AMREX_SPACEDIM; j++)
160 ret(i) += a.mu*(b(i,j,j) + b(j,i,j)) + a.lambda*b(j,j,i);
161
162 return ret;
163}
164
167{
168 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
169 ret.mu = a.mu * b;
170 ret.lambda = a.lambda * b;
171 return ret;
172}
175{
176 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
177 ret.mu = a.mu / b;
178 ret.lambda = a.lambda / b;
179 return ret;
180}
186
189{
190 if (a.mu != b.mu) return false;
191 if (a.lambda != b.lambda) return false;
192 return true;
193}
196{
197 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
198 ret.mu = a.mu + b.mu;
199 ret.lambda = a.lambda + b.lambda;
200 return ret;
201}
202
212
213
214}
215#endif
Matrix< _Scalar, _Rows, _Cols > & operator*=(const amrex::Vector< amrex::Real > &x)
Definition Eigen_Amrex.H:18
AMREX_FORCE_INLINE void operator+=(const OP_CLASS &rhs)
AMREX_GPU_HOST_DEVICE Matrix4(Set::Scalar a_lambda, Set::Scalar a_mu)
Matrix4< AMREX_SPACEDIM, Sym::Isotropic > Inverse() const
static Matrix4< AMREX_SPACEDIM, Sym::Isotropic > Zero()
A collection of data types and symmetry-reduced data structures.
Definition Base.H:18
AMREX_FORCE_INLINE Quaternion operator-(const Quaternion a, const Quaternion b)
Definition Base.H:100
Sym
Definition Base.H:197
@ Isotropic
Definition Base.H:197
amrex::Real Scalar
Definition Base.H:19
AMREX_FORCE_INLINE AMREX_GPU_HOST_DEVICE Matrix4< AMREX_SPACEDIM, Sym::Diagonal > operator/(const Matrix4< AMREX_SPACEDIM, Sym::Diagonal > &a, const Set::Scalar &b)
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, 1 > Vector
Definition Base.H:20
AMREX_FORCE_INLINE bool operator==(const Quaternion a, const Quaternion b)
Definition Base.H:108
AMREX_FORCE_INLINE Quaternion operator+(const Quaternion a, const Quaternion b)
Definition Base.H:92
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, AMREX_SPACEDIM > Matrix
Definition Base.H:23
AMREX_FORCE_INLINE Quaternion operator*(const Set::Scalar alpha, const Quaternion b)
Definition Base.H:78
Set::Scalar Random()
Definition Set.cpp:9