};
using DomainEle = VolumeElementForcesAndSourcesCore;
};
constexpr double rho = 1;
constexpr double omega = 2.4;
private:
};
}
}
auto project_ho_geometry = [&]() {
};
}
simple->getProblemName(),
"U");
}
t_source(0) = 0.1;
t_source(1) = 1.;
return t_source;
};
auto rho_ptr = boost::make_shared<double>(
rho);
auto add_rho_block = [
this, rho_ptr](
auto &pip,
auto block_name,
Sev sev) {
OpMatRhoBlocks(boost::shared_ptr<double> rho_ptr,
std::vector<const CubitMeshSets *> meshset_vec_ptr)
"Can not get data from block");
}
for (auto &b : blockData) {
if (b.blockEnts.find(getFEEntityHandle()) != b.blockEnts.end()) {
*rhoPtr = b.rho;
}
}
}
private:
};
std::vector<BlockData> blockData;
std::vector<const CubitMeshSets *> meshset_vec_ptr,
for (
auto m : meshset_vec_ptr) {
std::vector<double> block_data;
CHKERR m->getAttributes(block_data);
if (block_data.size() < 1) {
"Expected that block has two attributes");
}
auto get_block_ents = [&]() {
m_field.
get_moab().get_entities_by_handle(
m->meshset, ents,
true);
return ents;
};
<<
m->getName() <<
": ro = " << block_data[0];
blockData.push_back({block_data[0], get_block_ents()});
}
}
boost::shared_ptr<double> rhoPtr;
};
pip.push_back(new OpMatRhoBlocks(
(boost::format("%s(.*)") % block_name).str()
))
));
};
return *rho_ptr;
};
auto get_time_scale = [this](const double time) {
return sin(time *
omega * M_PI);
};
auto apply_lhs = [&](auto &pip) {
"GEOMETRY");
mField, pip,
"U",
"MAT_ELASTIC", Sev::verbose);
CHKERR add_rho_block(pip,
"MAT_RHO", Sev::verbose);
pip.push_back(
new OpMass(
"U",
"U", get_rho));
static_cast<OpMass &
>(pip.back()).feScalingFun =
[](
const FEMethod *fe_ptr) {
return fe_ptr->ts_aa; };
};
auto apply_rhs = [&](auto &pip) {
pipeline_mng->getOpDomainRhsPipeline(), {H1}, "GEOMETRY");
mField, pip,
"U",
"MAT_ELASTIC", Sev::inform);
CHKERR add_rho_block(pip,
"MAT_RHO", Sev::inform);
auto mat_acceleration_ptr = boost::make_shared<MatrixDouble>();
"U", mat_acceleration_ptr));
pip.push_back(
new OpInertiaForce(
"U", mat_acceleration_ptr, get_rho));
{boost::make_shared<TimeScaleVector<SPACE_DIM>>("-time_vector_file",
true)},
"BODY_FORCE", Sev::inform);
};
CHKERR apply_lhs(pipeline_mng->getOpDomainLhsPipeline());
CHKERR apply_rhs(pipeline_mng->getOpDomainRhsPipeline());
};
auto get_bc_hook = [&]() {
mField, pipeline_mng->getDomainRhsFE(),
{boost::make_shared<TimeScale>()});
return hook;
};
pipeline_mng->getDomainRhsFE()->preProcessHook = get_bc_hook();
}
"out_step_" + boost::lexical_cast<std::string>(
ts_step) +
".h5m");
}
}
private:
boost::shared_ptr<PostProcEle>
postProc;
};
ts = pipeline_mng->createTSIM2();
auto post_proc_fe = boost::make_shared<PostProcEle>(
mField);
post_proc_fe->getOpPtrVector(), {H1});
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, post_proc_fe->getOpPtrVector(),
"U",
"MAT_ELASTIC", Sev::inform);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe->getOpPtrVector().push_back(
auto X_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getPostProcMesh(), post_proc_fe->getMapGaussPts(),
{},
{{"U", u_ptr}, {"GEOMETRY", X_ptr}},
{{"GRAD", common_ptr->matGradPtr},
{"FIRST_PIOLA", common_ptr->getMatFirstPiolaStress()}},
{}
)
);
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = boost::make_shared<Monitor>(dm, post_proc_fe);
null_fe, monitor_ptr);
double ftime = 1;
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
SCATTER_FORWARD);
CHKERR TS2SetSolution(ts, T, TT);
PetscInt steps, snesfails, rejects, nonlinits, linits;
#if PETSC_VERSION_GE(3, 8, 0)
CHKERR TSGetStepNumber(ts, &steps);
#else
CHKERR TSGetTimeStepNumber(ts, &steps);
#endif
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
"steps %d (%d rejected, %d SNES fails), ftime %g, nonlinits "
"%d, linits %d\n",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
PetscBool test_flg = PETSC_FALSE;
if (test_flg) {
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG(
"EXAMPLE", Sev::inform) <<
"Regression norm " << nrm2;
constexpr double regression_value = 0.0194561;
if (fabs(nrm2 - regression_value) > 1e-2)
"Regression test failed; wrong norm value.");
}
}
}
static char help[] =
"...\n\n";
int main(
int argc,
char *argv[]) {
const char param_file[] = "param_file.petsc";
auto core_log = logging::core::get();
core_log->add_sink(
LogManager::createSink(LogManager::getStrmWorld(), "EXAMPLE"));
LogManager::setLog("EXAMPLE");
try {
DMType dm_name = "DMMOFEM";
moab::Core mb_instance;
moab::Interface &moab = mb_instance;
}
}
#define MOFEM_LOG_C(channel, severity, format,...)
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
void simple(double P1[], double P2[], double P3[], double c[], const int N)
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
[Define dimension]
#define CATCH_ERRORS
Catch errors.
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base nme:nme847.
@ AINSWORTH_BERNSTEIN_BEZIER_BASE
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
@ MOFEM_ATOM_TEST_INVALID
@ MOFEM_DATA_INCONSISTENCY
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpBaseTimesVector< 1, SPACE_DIM, 1 > OpInertiaForce
constexpr double omega
Save field DOFS on vertices/tags.
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
auto createDMVector(DM dm)
Get smart vector from DM.
#define MOFEM_LOG(channel, severity)
Log.
SeverityLevel
Severity levels.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
virtual MoFEMErrorCode loop_dofs(const Problem *problem_ptr, const std::string &field_name, RowColData rc, DofMethod &method, int lower_rank, int upper_rank, int verb=DEFAULT_VERBOSITY)=0
Make a loop over dofs.
MoFEMErrorCode getCubitMeshsetPtr(const int ms_id, const CubitBCType cubit_bc_type, const CubitMeshSets **cubit_meshset_ptr) const
get cubit meshset
MoFEMErrorCode opFactoryDomainLhs(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string field_name, boost::shared_ptr< HenckyOps::CommonData > common_ptr, Sev sev)
MoFEMErrorCode opFactoryDomainRhs(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string field_name, boost::shared_ptr< HenckyOps::CommonData > common_ptr, Sev sev)
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
PetscErrorCode PetscOptionsGetBool(PetscOptions *, const char pre[], const char name[], PetscBool *bval, PetscBool *set)
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
static double * ts_aa_ptr
static double * ts_time_ptr
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
double young_modulus
Young modulus.
#define EXECUTABLE_DIMENSION
double poisson_ratio
Poisson ratio.
static constexpr int approx_order
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpMass< 1, SPACE_DIM > OpMass
[Only used with Hooke equation (linear material model)]
FTensor::Index< 'i', 3 > i
FTensor::Index< 'm', 3 > m
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode assembleSystem()
[Push operators to pipeline]
MoFEMErrorCode readMesh()
[Run problem]
MoFEMErrorCode checkResults()
[Postprocess results]
MoFEMErrorCode solveSystem()
[Solve]
Example(MoFEM::Interface &m_field)
MoFEMErrorCode runProblem()
[Run problem]
MoFEM::Interface & mField
MoFEMErrorCode setupProblem()
[Run problem]
MoFEMErrorCode outputResults()
[Solve]
Simple interface for fast problem set-up.
virtual moab::Interface & get_moab()=0
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Deprecated interface functions.
Definition of the displacement bc data structure.
Data on single entity (This is passed as argument to DataOperator::doWork)
Class (Function) to enforce essential constrains.
Interface for managing meshsets containing materials and boundary conditions.
Approximate field values for given petsc vector.
Get values at integration pts for tensor filed rank 1, i.e. vector field.
Post post-proc data at points from hash maps.
PipelineManager interface.
Projection of edge entities with one mid-node on hierarchical basis.
Simple interface for fast problem set-up.
intrusive_ptr for managing petsc objects
PetscInt ts_step
time step number
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
[Push operators to pipeline]
Monitor(SmartPetscObj< DM > dm, MoFEM::Interface &m_field, boost::shared_ptr< PostProcEle > post_proc, boost::shared_ptr< PostProcFaceEle > post_proc_bdry, boost::shared_ptr< MatrixDouble > velocity_field_ptr, boost::shared_ptr< MatrixDouble > x2_field_ptr, boost::shared_ptr< MatrixDouble > geometry_field_ptr, std::array< double, 3 > pass_field_eval_coords, boost::shared_ptr< SetPtsData > pass_field_eval_data)
MoFEMErrorCode postProcess()
function is run at the end of loop
boost::shared_ptr< PostProcEle > postProc
#ifndef __HENCKY_OPS_HPP__
#define __HENCKY_OPS_HPP__
constexpr double eps = std::numeric_limits<float>::epsilon();
auto f = [](
double v) {
return 0.5 * std::log(
v); };
auto d_f = [](
double v) {
return 0.5 /
v; };
auto dd_f = [](
double v) {
return -0.5 / (
v *
v); };
struct isEq {
static inline auto check(
const double &
a,
const double &b) {
}
};
inline auto is_eq(
const double &
a,
const double &b) {
};
template <
int DIM>
inline auto get_uniq_nb(
double *ptr) {
std::array<double, DIM> tmp;
std::copy(ptr, &ptr[DIM], tmp.begin());
std::sort(tmp.begin(), tmp.end());
isEq::absMax = std::max(std::abs(tmp[0]), std::abs(tmp[DIM - 1]));
return std::distance(tmp.begin(), std::unique(tmp.begin(), tmp.end(),
is_eq));
};
template <int DIM>
std::max(std::max(std::abs(eig(0)), std::abs(eig(1))), std::abs(eig(2)));
if (
is_eq(eig(0), eig(1))) {
}
else if (
is_eq(eig(0), eig(2))) {
}
else if (
is_eq(eig(1), eig(2))) {
}
eigen_vec(
i, 0), eigen_vec(
i, 1), eigen_vec(
i, 2),
eigen_vec(
j, 0), eigen_vec(
j, 1), eigen_vec(
j, 2),
eigen_vec(
k, 0), eigen_vec(
k, 1), eigen_vec(
k, 2)};
{
eigen_vec(
i,
j) = eigen_vec_c(
i,
j);
}
};
struct CommonData :
public boost::enable_shared_from_this<CommonData> {
boost::shared_ptr<MatrixDouble>
matDPtr;
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matLogC);
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matTangent);
}
};
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateEigenValsImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogC_dCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyPlasticStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculatePiolaStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpHenckyTangentImpl;
template <int DIM, typename DomainEleOp>
OpCalculateEigenValsImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
commonDataPtr->matEigVal.resize(DIM, nb_gauss_pts, false);
commonDataPtr->matEigVec.resize(DIM * DIM, nb_gauss_pts, false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
for (int ii = 0; ii != DIM; ii++)
for (int jj = 0; jj != DIM; jj++)
eigen_vec(ii, jj) = C(ii, jj);
for (auto ii = 0; ii != DIM; ++ii)
eig(ii) = std::max(std::numeric_limits<double>::epsilon(), eig(ii));
if constexpr (DIM == 3) {
if (nb_uniq == 2) {
}
}
t_eig_vec(
i,
j) = eigen_vec(
i,
j);
++t_grad;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matLogC.resize(
size_symm, nb_gauss_pts,
false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
t_logC(
i,
j) = logC(
i,
j);
++t_eig_val;
++t_eig_vec;
++t_logC;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
OpCalculateLogC_dCImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
dlogC_dC(
i,
j,
k,
l) *= 2;
t_logC_dC(
i,
j,
k,
l) = dlogC_dC(
i,
j,
k,
l);
++t_logC_dC;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * t_logC(
k,
l);
++t_logC;
++t_T;
++t_D;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyPlasticStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr,
scaleStress(
scale), matDPtr(mat_D_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
matLogCPlastic = commonDataPtr->matLogCPlastic;
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * (t_logC(
k,
l) - t_logCPlastic(
k,
l));
t_T(
i,
j) /= scaleStress;
++t_logC;
++t_T;
++t_D;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
const double scaleStress;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculatePiolaStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
#ifdef HENCKY_SMALL_STRAIN
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
#endif
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_P(
i,
j) = t_D(
i,
j,
k,
l) * t_grad(
k,
l);
#else
t_S(
k,
l) = t_T(
i,
j) * t_logC_dC(
i,
j,
k,
l);
t_P(
i,
l) = t_F(
i,
k) * t_S(
k,
l);
#endif
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
#ifdef HENCKY_SMALL_STRAIN
++t_D;
#endif
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr = nullptr)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
if (mat_D_ptr)
matDPtr = mat_D_ptr;
else
matDPtr = commonDataPtr->matDPtr;
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
commonDataPtr->matTangent.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dP_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(commonDataPtr->matTangent);
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
#else
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
auto TL =
P_D_P_plus_TL(
i,
j,
k,
l) =
(t_logC_dC(
i,
j, o, p) * t_D(o, p,
m,
n)) * t_logC_dC(
m,
n,
k,
l);
P_D_P_plus_TL(
i,
j,
k,
l) *= 0.5;
t_F(
i,
k) * (P_D_P_plus_TL(
k,
j, o, p) * dC_dF(o, p,
m,
n));
#endif
++dP_dF;
++t_grad;
++t_eig_val;
++t_eig_vec;
++t_logC_dC;
++t_S;
++t_T;
++t_D;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
};
template <typename DomainEleOp> struct HenckyIntegrators {
template <int DIM, IntegrationType I>
template <int DIM, IntegrationType I>
template <int DIM, IntegrationType I>
template <int DIM, IntegrationType I, int S>
OpCalculateHenckyStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S = 0>
OpCalculateHenckyPlasticStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
OpCalculatePiolaStressImpl<DIM, GAUSS, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
};
template <int DIM>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string block_name,
boost::shared_ptr<MatrixDouble> mat_D_Ptr, Sev sev,
std::vector<const CubitMeshSets *> meshset_vec_ptr,
"Can not get data from block");
}
EntitiesFieldData::EntData &data) {
for (auto &b : blockData) {
if (b.blockEnts.find(getFEEntityHandle()) != b.blockEnts.end()) {
CHKERR getMatDPtr(matDPtr, b.bulkModulusK * scaleYoungModulus,
b.shearModulusG * scaleYoungModulus);
}
}
CHKERR getMatDPtr(matDPtr, bulkModulusKDefault * scaleYoungModulus,
shearModulusGDefault * scaleYoungModulus);
}
private:
boost::shared_ptr<MatrixDouble> matDPtr;
const double scaleYoungModulus;
double bulkModulusK;
double shearModulusG;
};
double bulkModulusKDefault;
double shearModulusGDefault;
std::vector<BlockData> blockData;
std::vector<const CubitMeshSets *> meshset_vec_ptr,
Sev sev) {
for (
auto m : meshset_vec_ptr) {
std::vector<double> block_data;
CHKERR m->getAttributes(block_data);
if (block_data.size() != 2) {
"Expected that block has two attribute");
}
auto get_block_ents = [&]() {
m_field.
get_moab().get_entities_by_handle(
m->meshset, ents,
true);
return ents;
};
blockData.push_back(
}
}
auto set_material_stiffness = [&]() {
double A = (DIM == 2)
: 1;
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*mat_D_ptr);
};
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
set_material_stiffness();
}
};
double nu = 0.3;
CHKERR PetscOptionsBegin(PETSC_COMM_WORLD,
"",
"",
"none");
CHKERR PetscOptionsScalar(
"-young_modulus",
"Young modulus",
"",
E, &
E,
PETSC_NULL);
CHKERR PetscOptionsScalar(
"-poisson_ratio",
"poisson ratio",
"", nu, &nu,
PETSC_NULL);
ierr = PetscOptionsEnd();
pip.push_back(new OpMatBlocks(
m_field.
getInterface<MeshsetsManager>()->getCubitMeshsetPtr(std::regex(
(boost::format("%s(.*)") % block_name).str()
)),
));
}
template <int DIM, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
auto common_ptr = boost::make_shared<HenckyOps::CommonData>();
common_ptr->matDPtr = boost::make_shared<MatrixDouble>();
common_ptr->matGradPtr = boost::make_shared<MatrixDouble>();
common_ptr->matDPtr, sev,
scale),
"addMatBlockOps");
using H = HenckyIntegrators<DomainEleOp>;
pip.push_back(new typename H::template OpCalculateEigenVals<DIM, I>(
pip.push_back(
new typename H::template OpCalculateLogC<DIM, I>(
field_name, common_ptr));
pip.push_back(new typename H::template OpCalculateLogC_dC<DIM, I>(
pip.push_back(new typename H::template OpCalculateHenckyStress<DIM, I, 0>(
pip.push_back(new typename H::template OpCalculatePiolaStress<DIM, I, 0>(
return common_ptr;
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B =
typename FormsIntegrators<DomainEleOp>::template Assembly<
pip.push_back(
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
common_ptr, sev);
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B =
typename FormsIntegrators<DomainEleOp>::template Assembly<
using OpKPiola =
typename B::template OpGradTensorGrad<1, DIM, DIM, 1>;
using H = HenckyIntegrators<DomainEleOp>;
pip.push_back(
new typename H::template OpHenckyTangent<DIM, I, 0>(
field_name, common_ptr));
pip.push_back(
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
common_ptr, sev);
}
}
#endif
static PetscErrorCode ierr
Kronecker Delta class symmetric.
FTensor::Index< 'i', SPACE_DIM > i
const double v
phase velocity of light in medium (cm/ns)
const double n
refractive index of diffusive medium
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
FTensor::Ddg< double, 3, 3 > getDiffMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f, Fun< double > d_f, const int nb)
Get the Diff Mat object.
FTensor::Tensor2_symmetric< double, 3 > getMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f)
Get the Mat object.
FTensor::Ddg< double, 3, 3 > getDiffDiffMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f, Fun< double > d_f, Fun< double > dd_f, FTensor::Tensor2< double, 3, 3 > &t_S, const int nb)
auto get_uniq_nb(double *ptr)
auto sort_eigen_vals(FTensor::Tensor1< double, DIM > &eig, FTensor::Tensor2< double, DIM, DIM > &eigen_vec)
MoFEMErrorCode addMatBlockOps(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string block_name, boost::shared_ptr< MatrixDouble > mat_D_Ptr, Sev sev, double scale=1)
auto commonDataFactory(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string field_name, std::string block_name, Sev sev, double scale=1)
auto is_eq(const double &a, const double &b)
MoFEMErrorCode computeEigenValuesSymmetric(const MatrixDouble &mat, VectorDouble &eig, MatrixDouble &eigen_vec)
compute eigenvalues of a symmetric matrix using lapack dsyev
FormsIntegrators< DomainEleOp >::Assembly< A >::LinearForm< I >::OpGradTimesTensor< 1, FIELD_DIM, SPACE_DIM > OpGradTimesTensor
constexpr auto field_name
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpGradTensorGrad< 1, SPACE_DIM, SPACE_DIM, 1 > OpKPiola
[Only used for dynamics]
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpGradTimesTensor< 1, SPACE_DIM, SPACE_DIM > OpInternalForcePiola
auto getMatHenckyStress()
auto getMatFirstPiolaStress()
boost::shared_ptr< MatrixDouble > matLogCPlastic
MatrixDouble matFirstPiolaStress
boost::shared_ptr< MatrixDouble > matDPtr
MatrixDouble matSecondPiolaStress
MatrixDouble matHenckyStress
boost::shared_ptr< MatrixDouble > matGradPtr
OpCalculatePiolaStressImpl< DIM, GAUSS, DomainEleOp, S > OpCalculatePiolaStress
OpCalculateLogCImpl< DIM, I, DomainEleOp > OpCalculateLogC
OpCalculateHenckyPlasticStressImpl< DIM, I, DomainEleOp, S > OpCalculateHenckyPlasticStress
OpCalculateLogC_dCImpl< DIM, I, DomainEleOp > OpCalculateLogC_dC
OpHenckyTangentImpl< DIM, GAUSS, DomainEleOp, S > OpHenckyTangent
OpCalculateEigenValsImpl< DIM, I, DomainEleOp > OpCalculateEigenVals
OpCalculateHenckyStressImpl< DIM, I, DomainEleOp, S > OpCalculateHenckyStress
static auto check(const double &a, const double &b)