Alamo
Matrix4_Isotropic.H
Go to the documentation of this file.
1//
2// The isotropic tensor is defined to be
3//
4// .. math::
5//
6// C_{ijkl} = \mu(d_{il}d_{jk} + d_{ik}d_{jl}) + \lambda d_{ij} d_{kl}
7//
8// The inverse ("compliance") tensor is
9//
10// .. math::
11//
12// S_{ijkl} = ((1+\nu)/2E)(d_{il}d_{jl} + d_{ik}d_{jl}) - (\nu/E)d_{ij}d_{kl}
13//
14// Replacing E, nu with Lame constants gives:
15//
16// .. math::
17//
18// S_{ijkl} = (1/(4 \mu))(d_{il}d_{jl} + d_{ik}d_{jl}) + (\lambda/(2*\mu*(3\lambda+2\mu))) * d_{ij} * d_{kl}
19//
20// For reference: http://solidmechanics.org/text/Chapter3_2/Chapter3_2.htm
21// https://en.wikipedia.org/wiki/Lam%C3%A9_parameters
22//
23
24#ifndef SET_MATRIX4_ISOTROPIC_H
25#define SET_MATRIX4_ISOTROPIC_H
26
27#include "Util/Util.H"
28#include "Base.H"
29
30namespace Set
31{
32template<>
34{
35 Set::Scalar lambda=NAN, mu=NAN;
36public:
39
40 /// Note: for the Isotropic Matrix4 this routine works for **retrieval only**!
41 /// If you try to assign a value using this with, say
42 ///
43 /// isotropicmatrix4(i,j,k,l) = 8.0
44 ///
45 /// you will get a `lvalue required as left operand of assignment` compile error.
46 /// You should probably consider using a lower symmetry operator.
48 Scalar operator () (const int i, const int j, const int k, const int l) const
49 {
50 Set::Scalar ret = 0.0;
51 if (i==k && j==l) ret += mu;
52 if (i==l && j==k) ret += mu;
53 if (i==j && k==l) ret += lambda;
54 return ret;
55 }
56 void Randomize()
57 {
58 lambda = Util::Random();
59 mu = Util::Random();
60 }
61 void Print (std::ostream& os )
62 {
63 os << "lambda = " << lambda << " mu = " << mu;
64 }
66 {
67 return lambda;
68 }
69 Set::Scalar Mu () const
70 {
71 return mu;
72 }
80
81
83 {
85 inv.mu = 1./4./mu;
86 inv.lambda = lambda / (2*mu*(3*lambda + 2*mu));
87 return inv;
88 }
98
99 //AMREX_GPU_HOST_DEVICE void operator = (Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda = a.lambda; mu = a.mu;}
100 AMREX_GPU_HOST_DEVICE void operator += (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda += a.lambda; mu += a.mu;}
101 AMREX_GPU_HOST_DEVICE void operator -= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda -= a.lambda; mu -= a.mu;}
102 AMREX_GPU_HOST_DEVICE void operator *= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda *= a.lambda; mu *= a.mu;}
103 AMREX_GPU_HOST_DEVICE void operator /= (const Matrix4<AMREX_SPACEDIM,Sym::Isotropic> &a) {lambda /= a.lambda; mu /= a.mu;}
104 AMREX_GPU_HOST_DEVICE void operator *= (const Set::Scalar &alpha) {lambda *= alpha; mu *= alpha;}
105 AMREX_GPU_HOST_DEVICE void operator /= (const Set::Scalar &alpha) {lambda /= alpha; mu /= alpha;}
106
108 {
109 return std::sqrt(lambda*lambda + mu*mu);
110 }
111
112 bool contains_nan() const
113 {
114 if (std::isnan(lambda)) return true;
115 if (std::isnan(mu)) return true;
116 return false;
117 }
118};
121{
123
124 #if AMREX_SPACEDIM == 2
125 ret(0,0) = (a.lambda + 2.*a.mu) * b(0,0) + a.lambda *b(1,1);
126 ret(1,1) = a.lambda * b(0,0) + (a.lambda + 2.*a.mu)*b(1,1);
127 ret(0,1) = a.mu*(b(0,1) + b(1,0)); ret(1,0) = ret(0,1);
128
129 #elif AMREX_SPACEDIM == 3
130 ret(0,0) = (a.lambda + 2.*a.mu) * b(0,0) + a.lambda *b(1,1) + a.lambda *b(2,2);
131 ret(1,1) = a.lambda * b(0,0) + (a.lambda + 2.*a.mu)*b(1,1) + a.lambda *b(2,2);
132 ret(2,2) = a.lambda * b(0,0) + a.lambda *b(1,1) + (a.lambda + 2.*a.mu)*b(2,2);
133 ret(1,2) = a.mu*(b(1,2) + b(2,1)); ret(2,1) = ret(1,2);
134 ret(2,0) = a.mu*(b(2,0) + b(0,2)); ret(0,2) = ret(2,0);
135 ret(0,1) = a.mu*(b(0,1) + b(1,0)); ret(1,0) = ret(0,1);
136
137 #endif
138 return ret;
139}
143
144
147{
148 Set::Vector ret = Set::Vector::Zero();
149
150 for (int i = 0; i < AMREX_SPACEDIM; i++)
151 for (int j=0; j < AMREX_SPACEDIM; j++)
152 ret(i) += a.mu*(b(i,j,j) + b(j,i,j)) + a.lambda*b(j,j,i);
153
154 return ret;
155}
156
159{
160 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
161 ret.mu = a.mu * b;
162 ret.lambda = a.lambda * b;
163 return ret;
164}
167{
168 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
169 ret.mu = a.mu / b;
170 ret.lambda = a.lambda / b;
171 return ret;
172}
178
181{
182 if (a.mu != b.mu) return false;
183 if (a.lambda != b.lambda) return false;
184 return true;
185}
188{
189 Matrix4<AMREX_SPACEDIM,Sym::Isotropic> ret;// = Set::Vector::Zero();
190 ret.mu = a.mu + b.mu;
191 ret.lambda = a.lambda + b.lambda;
192 return ret;
193}
194
204
205
206}
207#endif
Matrix< _Scalar, _Rows, _Cols > & operator*=(const amrex::Vector< amrex::Real > &x)
Definition Eigen_Amrex.H:18
AMREX_FORCE_INLINE void operator+=(const OP_CLASS &rhs)
AMREX_GPU_HOST_DEVICE Matrix4(Set::Scalar a_lambda, Set::Scalar a_mu)
Matrix4< AMREX_SPACEDIM, Sym::Isotropic > Inverse() const
static Matrix4< AMREX_SPACEDIM, Sym::Isotropic > Zero()
A collection of data types and symmetry-reduced data structures.
Definition Base.H:18
AMREX_FORCE_INLINE Quaternion operator-(const Quaternion a, const Quaternion b)
Definition Base.H:100
Sym
Definition Base.H:197
@ Isotropic
Definition Base.H:197
amrex::Real Scalar
Definition Base.H:19
AMREX_FORCE_INLINE AMREX_GPU_HOST_DEVICE Matrix4< AMREX_SPACEDIM, Sym::Diagonal > operator/(const Matrix4< AMREX_SPACEDIM, Sym::Diagonal > &a, const Set::Scalar &b)
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, 1 > Vector
Definition Base.H:20
AMREX_FORCE_INLINE bool operator==(const Quaternion a, const Quaternion b)
Definition Base.H:108
AMREX_FORCE_INLINE Quaternion operator+(const Quaternion a, const Quaternion b)
Definition Base.H:92
Eigen::Matrix< amrex::Real, AMREX_SPACEDIM, AMREX_SPACEDIM > Matrix
Definition Base.H:23
AMREX_FORCE_INLINE Quaternion operator*(const Set::Scalar alpha, const Quaternion b)
Definition Base.H:78
Set::Scalar Random()
Definition Set.cpp:9