v0.14.0
Loading...
Searching...
No Matches
test_broken_space.cpp
Go to the documentation of this file.
1/**
2 * \example test_broken_space.cpp
3 *
4 * Testing broken spaces. Implementations works for 2d and 3d meshes, is aiming
5 * to test H-div broken base functions, and L2 base on skeleton.
6 *
7 * Also, it test block matrix with Schur complement.
8 *
9 */
10
11#include <MoFEM.hpp>
13
14using namespace MoFEM;
15
16static char help[] = "...\n\n";
17
18constexpr bool debug = false;
19
20constexpr AssemblyType AT =
21 (SCHUR_ASSEMBLE) ? AssemblyType::BLOCK_SCHUR
22 : AssemblyType::PETSC; //< selected assembly type
23
25 IntegrationType::GAUSS; //< selected integration type
26
27constexpr int SPACE_DIM =
28 EXECUTABLE_DIMENSION; //< Space dimension of problem, mesh
29
34
36using DomainEleOp = DomainEle::UserDataOperator;
37using BdyEleOp = BoundaryEle::UserDataOperator;
38using SideEleOp = EleOnSide::UserDataOperator;
39
40struct SetUpSchur {
41 static boost::shared_ptr<SetUpSchur>
44
45protected:
46 SetUpSchur() = default;
47 virtual ~SetUpSchur() = default;
48};
49
51
52int main(int argc, char *argv[]) {
53
54 MoFEM::Core::Initialize(&argc, &argv, (char *)0, help);
55
56 try {
57
58 //! [Register MoFEM discrete manager in PETSc]
59 DMType dm_name = "DMMOFEM";
60 CHKERR DMRegister_MoFEM(dm_name);
61 DMType dm_name_mg = "DMMOFEM_MG";
63 //! [Register MoFEM discrete manager in PETSc
64
65 moab::Core mb_instance;
66 moab::Interface &moab = mb_instance;
67
68 // Add logging channel for example
69 auto core_log = logging::core::get();
70 core_log->add_sink(
72 core_log->add_sink(
75 LogManager::setLog("TIMER");
76 MOFEM_LOG_TAG("AT", "atom_test");
77 MOFEM_LOG_TAG("TIMER", "timer");
78
79 // Create MoFEM instance
80 MoFEM::Core core(moab);
81 MoFEM::Interface &m_field = core;
82
83 auto *simple = m_field.getInterface<Simple>();
84 CHKERR simple->getOptions();
85 simple->getAddBoundaryFE() = true;
86 CHKERR simple->loadFile();
87
88 auto add_shared_entities_on_skeleton = [&]() {
90 auto boundary_meshset = simple->getBoundaryMeshSet();
91 auto skeleton_meshset = simple->getSkeletonMeshSet();
92 Range bdy_ents;
93 CHKERR m_field.get_moab().get_entities_by_handle(boundary_meshset,
94 bdy_ents, true);
95 Range skeleton_ents;
96 CHKERR m_field.get_moab().get_entities_by_dimension(
97 0, simple->getDim() - 1, skeleton_ents, true);
98 skeleton_ents = subtract(skeleton_ents, bdy_ents);
99 CHKERR m_field.get_moab().clear_meshset(&skeleton_meshset, 1);
100 CHKERR m_field.get_moab().add_entities(skeleton_meshset, skeleton_ents);
102 };
103
104 CHKERR add_shared_entities_on_skeleton();
105
106 // Declare elements
107 enum bases {
108 AINSWORTH,
109 AINSWORTH_LOBATTO,
110 DEMKOWICZ,
111 BERNSTEIN,
112 LASBASETOP
113 };
114 const char *list_bases[] = {"ainsworth", "ainsworth_lobatto", "demkowicz",
115 "bernstein"};
116 PetscBool flg;
117 PetscInt choice_base_value = AINSWORTH;
118 CHKERR PetscOptionsGetEList(PETSC_NULL, NULL, "-base", list_bases,
119 LASBASETOP, &choice_base_value, &flg);
120
121 if (flg != PETSC_TRUE)
122 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE, "base not set");
124 if (choice_base_value == AINSWORTH)
126 if (choice_base_value == AINSWORTH_LOBATTO)
128 else if (choice_base_value == DEMKOWICZ)
130 else if (choice_base_value == BERNSTEIN)
132
133 enum spaces { hdiv, hcurl, last_space };
134 const char *list_spaces[] = {"hdiv", "hcurl"};
135 PetscInt choice_space_value = hdiv;
136 CHKERR PetscOptionsGetEList(PETSC_NULL, NULL, "-space", list_spaces,
137 last_space, &choice_space_value, &flg);
138 if (flg != PETSC_TRUE)
139 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE, "space not set");
140 FieldSpace space = HDIV;
141 if (choice_space_value == hdiv)
142 space = HDIV;
143 else if (choice_space_value == hcurl)
144 space = HCURL;
145
146 CHKERR PetscOptionsGetInt(PETSC_NULL, "", "-order", &approx_order,
147 PETSC_NULL);
148
149 CHKERR simple->addDomainBrokenField("BROKEN", space, base, 1);
150 CHKERR simple->addDomainField("U", L2, base, 1);
151 CHKERR simple->addSkeletonField("HYBRID", L2, base, 1);
152
153 CHKERR simple->setFieldOrder("BROKEN", approx_order);
154 CHKERR simple->setFieldOrder("U", approx_order - 1);
155 CHKERR simple->setFieldOrder("HYBRID", approx_order - 1);
156
157 CHKERR simple->setUp();
158
159 auto bc_mng = m_field.getInterface<BcManager>();
160 CHKERR bc_mng->removeSideDOFs(simple->getProblemName(), "ZERO_FLUX",
161 "BROKEN", SPACE_DIM, 0, 1, true);
162
163 auto integration_rule = [](int, int, int p) { return 2 * p; };
164
165 auto assemble_domain_lhs = [&](auto &pip) {
168
172 IT>::OpMixDivTimesScalar<SPACE_DIM>;
173
174 auto beta = [](const double, const double, const double) constexpr {
175 return 1;
176 };
177
178 pip.push_back(new OpHdivHdiv("BROKEN", "BROKEN", beta));
179 auto unity = []() constexpr { return 1; };
180 pip.push_back(new OpHdivU("BROKEN", "U", unity, true));
181
182 // First: Iterate over skeleton FEs adjacent to Domain FEs
183 // Note: BoundaryEle, i.e. uses skeleton interation rule
184 auto op_loop_skeleton_side = new OpLoopSide<BoundaryEle>(
185 m_field, simple->getSkeletonFEName(), SPACE_DIM - 1, Sev::noisy);
186 op_loop_skeleton_side->getSideFEPtr()->getRuleHook = integration_rule;
188 op_loop_skeleton_side->getOpPtrVector(), {});
189
190 // Second: Iterate over domain FEs adjacent to skelton, particularly one
191 // domain element.
192 auto broken_data_ptr =
193 boost::make_shared<std::vector<BrokenBaseSideData>>();
194 // Note: EleOnSide, i.e. uses on domain projected skeleton rule
195 auto op_loop_domain_side = new OpBrokenLoopSide<EleOnSide>(
196 m_field, simple->getDomainFEName(), SPACE_DIM, Sev::noisy);
198 op_loop_domain_side->getOpPtrVector(), {HDIV});
199 op_loop_domain_side->getOpPtrVector().push_back(
200 new OpGetBrokenBaseSideData<SideEleOp>("BROKEN", broken_data_ptr));
201
202 op_loop_skeleton_side->getOpPtrVector().push_back(op_loop_domain_side);
204 IT>::OpBrokenSpaceConstrain<1>;
205 op_loop_skeleton_side->getOpPtrVector().push_back(
206 new OpC("HYBRID", broken_data_ptr, 1., true, false));
207
208 if (debug) {
209 // print skeleton elements on partition
210 constexpr int partition = 1;
211 auto op_print = new BdyEleOp(NOSPACE, BdyEleOp::OPSPACE);
212 op_print->doWorkRhsHook = [&](DataOperator *base_op_ptr, int side,
213 EntityType type,
216 if (auto op_ptr = dynamic_cast<BdyEleOp *>(base_op_ptr)) {
217 auto fe_method = op_ptr->getFEMethod();
218 auto num_fe = fe_method->numeredEntFiniteElementPtr;
219
220 if (m_field.get_comm_rank() == partition) {
221 if (num_fe->getPStatus() & PSTATUS_SHARED)
222 MOFEM_LOG("SELF", Sev::inform) << "Num FE: " << *num_fe;
223 }
224 }
226 };
227 op_loop_skeleton_side->getOpPtrVector().push_back(op_print);
228 };
229
230 pip.push_back(op_loop_skeleton_side);
231
233 };
234
235 auto assemble_domain_rhs = [&](auto &pip) {
240 auto source = [&](const double x, const double y,
241 const double z) constexpr {
242 return -1; // sin(100 * (x / 10.) * M_PI_2);
243 };
244 pip.push_back(new OpDomainSource("U", source));
246 };
247
248 auto *pip_mng = m_field.getInterface<PipelineManager>();
249
250 CHKERR assemble_domain_lhs(pip_mng->getOpDomainLhsPipeline());
251 CHKERR assemble_domain_rhs(pip_mng->getOpDomainRhsPipeline());
252
253 CHKERR pip_mng->setDomainLhsIntegrationRule(integration_rule);
254 CHKERR pip_mng->setDomainRhsIntegrationRule(integration_rule);
255 CHKERR pip_mng->setSkeletonLhsIntegrationRule(integration_rule);
256 CHKERR pip_mng->setSkeletonRhsIntegrationRule(integration_rule);
257
258 TetPolynomialBase::switchCacheBaseOn<HDIV>(
259 {pip_mng->getDomainLhsFE().get(), pip_mng->getDomainRhsFE().get()});
260 TetPolynomialBase::switchCacheBaseOn<L2>(
261 {pip_mng->getDomainLhsFE().get(), pip_mng->getDomainRhsFE().get()});
262
263 auto x = createDMVector(simple->getDM());
264 auto f = vectorDuplicate(x);
265
266 if (AT == PETSC) {
267 auto ksp = pip_mng->createKSP();
268
269 CHKERR KSPSetFromOptions(ksp);
270 BOOST_LOG_SCOPED_THREAD_ATTR("Timeline", attrs::timer());
271 MOFEM_LOG("TIMER", Sev::inform) << "KSPSetUp";
272 CHKERR KSPSetUp(ksp);
273 MOFEM_LOG("TIMER", Sev::inform) << "KSPSetUp <= Done";
274
275 MOFEM_LOG("TIMER", Sev::inform) << "KSPSolve";
276 CHKERR KSPSolve(ksp, f, x);
277 MOFEM_LOG("TIMER", Sev::inform) << "KSPSolve <= Done";
278
279 CHKERR VecGhostUpdateBegin(x, INSERT_VALUES, SCATTER_FORWARD);
280 CHKERR VecGhostUpdateEnd(x, INSERT_VALUES, SCATTER_FORWARD);
281 CHKERR DMoFEMMeshToLocalVector(simple->getDM(), x, INSERT_VALUES,
282 SCATTER_REVERSE);
283 } else {
284 auto ksp = pip_mng->createKSP();
285 auto schur_ptr = SetUpSchur::createSetUpSchur(m_field);
286 BOOST_LOG_SCOPED_THREAD_ATTR("Timeline", attrs::timer());
287 MOFEM_LOG("TIMER", Sev::inform) << "KSPSetUp";
288 CHKERR schur_ptr->setUp(ksp);
289 MOFEM_LOG("TIMER", Sev::inform) << "KSPSetUp <= Done";
290
291 MOFEM_LOG("TIMER", Sev::inform) << "KSPSolve";
292 CHKERR KSPSolve(ksp, f, x);
293 MOFEM_LOG("TIMER", Sev::inform) << "KSPSolve <= Done";
294
295 CHKERR VecGhostUpdateBegin(x, INSERT_VALUES, SCATTER_FORWARD);
296 CHKERR VecGhostUpdateEnd(x, INSERT_VALUES, SCATTER_FORWARD);
297 CHKERR DMoFEMMeshToLocalVector(simple->getDM(), x, INSERT_VALUES,
298 SCATTER_REVERSE);
299 }
300
301 auto check_residual = [&](auto x, auto f) {
303 auto *simple = m_field.getInterface<Simple>();
304 auto *pip_mng = m_field.getInterface<PipelineManager>();
305
306 // auto &skeleton_rhs = pip_mng->getOpSkeletonRhsPipeline();
307 auto &domain_rhs = pip_mng->getOpDomainRhsPipeline();
308 // skeleton_rhs.clear();
309 domain_rhs.clear();
310
312
313 auto div_flux_ptr = boost::make_shared<VectorDouble>();
314 domain_rhs.push_back(new OpCalculateHdivVectorDivergence<3, SPACE_DIM>(
315 "BROKEN", div_flux_ptr));
318 auto beta = [](double, double, double) constexpr { return 1; };
319 domain_rhs.push_back(new OpUDivFlux("U", div_flux_ptr, beta));
320 auto source = [&](const double x, const double y,
321 const double z) constexpr { return 1; };
324 domain_rhs.push_back(new OpDomainSource("U", source));
325
327 IT>::OpMixDivTimesU<3, 1, SPACE_DIM>;
330 auto flux_ptr = boost::make_shared<MatrixDouble>();
331 domain_rhs.push_back(
332 new OpCalculateHVecVectorField<3>("BROKEN", flux_ptr));
333 boost::shared_ptr<VectorDouble> u_ptr =
334 boost::make_shared<VectorDouble>();
335 domain_rhs.push_back(new OpCalculateScalarFieldValues("U", u_ptr));
336 domain_rhs.push_back(new OpHDivH("BROKEN", u_ptr, beta));
337 domain_rhs.push_back(new OpHdivFlux("BROKEN", flux_ptr, beta));
338
339 // First: Iterate over skeleton FEs adjacent to Domain FEs
340 // Note: BoundaryEle, i.e. uses skeleton interation rule
341 auto op_loop_skeleton_side = new OpLoopSide<BoundaryEle>(
342 m_field, simple->getSkeletonFEName(), SPACE_DIM - 1, Sev::noisy);
343 op_loop_skeleton_side->getSideFEPtr()->getRuleHook = integration_rule;
345 op_loop_skeleton_side->getOpPtrVector(), {});
346
347 // Second: Iterate over domain FEs adjacent to skelton, particularly one
348 // domain element.
349 auto broken_data_ptr =
350 boost::make_shared<std::vector<BrokenBaseSideData>>();
351 // Note: EleOnSide, i.e. uses on domain projected skeleton rule
352 auto op_loop_domain_side = new OpBrokenLoopSide<EleOnSide>(
353 m_field, simple->getDomainFEName(), SPACE_DIM, Sev::noisy);
355 op_loop_domain_side->getOpPtrVector(), {HDIV});
356 op_loop_domain_side->getOpPtrVector().push_back(
357 new OpGetBrokenBaseSideData<SideEleOp>("BROKEN", broken_data_ptr));
358 auto flux_mat_ptr = boost::make_shared<MatrixDouble>();
359 op_loop_domain_side->getOpPtrVector().push_back(
360 new OpCalculateHVecTensorField<1, 3>("BROKEN", flux_mat_ptr));
361 op_loop_domain_side->getOpPtrVector().push_back(
362 new OpSetFlux<SideEleOp>(broken_data_ptr, flux_mat_ptr));
363
364 // Assemble on skeleton
365 op_loop_skeleton_side->getOpPtrVector().push_back(op_loop_domain_side);
367 IT>::OpBrokenSpaceConstrainDHybrid<1>;
369 IT>::OpBrokenSpaceConstrainDFlux<1>;
370 op_loop_skeleton_side->getOpPtrVector().push_back(
371 new OpC_dHybrid("HYBRID", broken_data_ptr, 1.));
372 auto hybrid_ptr = boost::make_shared<MatrixDouble>();
373 op_loop_skeleton_side->getOpPtrVector().push_back(
374 new OpCalculateVectorFieldValues<1>("HYBRID", hybrid_ptr));
375 op_loop_skeleton_side->getOpPtrVector().push_back(
376 new OpC_dBroken(broken_data_ptr, hybrid_ptr, 1.));
377
378 // Add skeleton to domain pipeline
379 domain_rhs.push_back(op_loop_skeleton_side);
380
381 CHKERR VecZeroEntries(f);
382 CHKERR VecGhostUpdateBegin(f, INSERT_VALUES, SCATTER_FORWARD);
383 CHKERR VecGhostUpdateEnd(f, INSERT_VALUES, SCATTER_FORWARD);
384
385 pip_mng->getDomainRhsFE()->f = f;
386 pip_mng->getSkeletonRhsFE()->f = f;
387 pip_mng->getDomainRhsFE()->x = x;
388 pip_mng->getSkeletonRhsFE()->x = x;
389
391 simple->getDomainFEName(),
392 pip_mng->getDomainRhsFE());
393
394 CHKERR VecGhostUpdateBegin(f, ADD_VALUES, SCATTER_REVERSE);
395 CHKERR VecGhostUpdateEnd(f, ADD_VALUES, SCATTER_REVERSE);
396 CHKERR VecAssemblyBegin(f);
397 CHKERR VecAssemblyEnd(f);
398
399 double fnrm;
400 CHKERR VecNorm(f, NORM_2, &fnrm);
401 MOFEM_LOG_C("AT", Sev::inform, "Residual %3.4e", fnrm);
402
403 constexpr double eps = 1e-8;
404 if (fnrm > eps)
405 SETERRQ(PETSC_COMM_WORLD, MOFEM_ATOM_TEST_INVALID,
406 "Residual norm larger than accepted");
407
409 };
410
411 auto calculate_error = [&]() {
413
414 // auto &skeleton_rhs = pip_mng->getOpSkeletonRhsPipeline();
415 auto &domain_rhs = pip_mng->getOpDomainRhsPipeline();
416 // skeleton_rhs.clear();
417 domain_rhs.clear();
418
420
421 auto u_grad_ptr = boost::make_shared<MatrixDouble>();
422 auto flux_val_ptr = boost::make_shared<MatrixDouble>();
423 auto div_val_ptr = boost::make_shared<VectorDouble>();
424 auto source_ptr = boost::make_shared<VectorDouble>();
425
426 domain_rhs.push_back(
427 new OpCalculateScalarFieldGradient<SPACE_DIM>("U", u_grad_ptr));
428 domain_rhs.push_back(
429 new OpCalculateHVecVectorField<3, SPACE_DIM>("BROKEN", flux_val_ptr));
430 domain_rhs.push_back(new OpCalculateHdivVectorDivergence<3, SPACE_DIM>(
431 "BROKEN", div_val_ptr));
432 auto source = [&](const double x, const double y,
433 const double z) constexpr { return -1; };
434 domain_rhs.push_back(new OpGetTensor0fromFunc(source_ptr, source));
435
436 enum { DIV, GRAD, LAST };
437 auto mpi_vec = createVectorMPI(
438 m_field.get_comm(), (!m_field.get_comm_rank()) ? LAST : 0, LAST);
439 domain_rhs.push_back(
440 new OpCalcNormL2Tensor0(div_val_ptr, mpi_vec, DIV, source_ptr));
441 domain_rhs.push_back(new OpCalcNormL2Tensor1<SPACE_DIM>(
442 u_grad_ptr, mpi_vec, GRAD, flux_val_ptr));
443
445 simple->getDomainFEName(),
446 pip_mng->getDomainRhsFE());
447 CHKERR VecAssemblyBegin(mpi_vec);
448 CHKERR VecAssemblyEnd(mpi_vec);
449
450 if (!m_field.get_comm_rank()) {
451 const double *error_ind;
452 CHKERR VecGetArrayRead(mpi_vec, &error_ind);
453 MOFEM_LOG("AT", Sev::inform)
454 << "Approximation error ||div flux - source||: "
455 << std::sqrt(error_ind[DIV]);
456 MOFEM_LOG("AT", Sev::inform) << "Approximation error ||grad-flux||: "
457 << std::sqrt(error_ind[GRAD]);
458 CHKERR VecRestoreArrayRead(mpi_vec, &error_ind);
459 }
460
462 };
463
464 auto get_post_proc_fe = [&]() {
467 auto post_proc_fe = boost::make_shared<PostProcEle>(m_field);
468
469 auto op_loop_side = new OpLoopSide<EleOnSide>(
470 m_field, simple->getDomainFEName(), SPACE_DIM, Sev::noisy,
471 boost::make_shared<
472 ForcesAndSourcesCore::UserDataOperator::AdjCache>());
473 post_proc_fe->getOpPtrVector().push_back(op_loop_side);
474
476 op_loop_side->getOpPtrVector(), {HDIV});
477 auto u_vec_ptr = boost::make_shared<VectorDouble>();
478 auto flux_mat_ptr = boost::make_shared<MatrixDouble>();
479 op_loop_side->getOpPtrVector().push_back(
480 new OpCalculateScalarFieldValues("U", u_vec_ptr));
481 op_loop_side->getOpPtrVector().push_back(
482 new OpCalculateHVecVectorField<3>("BROKEN", flux_mat_ptr));
483 op_loop_side->getOpPtrVector().push_back(
484
485 new OpPPMap(
486
487 post_proc_fe->getPostProcMesh(),
488
489 post_proc_fe->getMapGaussPts(),
490
491 {{"U", u_vec_ptr}},
492
493 {{"BROKEN", flux_mat_ptr}},
494
495 {}, {})
496
497 );
498
499 return post_proc_fe;
500 };
501
502 auto post_proc_fe = get_post_proc_fe();
504 simple->getBoundaryFEName(), post_proc_fe);
505 CHKERR post_proc_fe->writeFile("out_result.h5m");
506
507 CHKERR calculate_error();
508 CHKERR check_residual(x, f);
509 }
511
513}
514
515struct SetUpSchurImpl : public SetUpSchur {
516
518
519 virtual ~SetUpSchurImpl() = default;
520
522
523private:
526};
527
531 auto pip_mng = mField.getInterface<PipelineManager>();
532
533 CHKERR KSPSetFromOptions(ksp);
534 PC pc;
535 CHKERR KSPGetPC(ksp, &pc);
536
537 PetscBool is_pcfs = PETSC_FALSE;
538 PetscObjectTypeCompare((PetscObject)pc, PCFIELDSPLIT, &is_pcfs);
539 if (is_pcfs) {
540
541 MOFEM_LOG("AT", Sev::inform) << "Setup Schur pc";
542
543 auto create_sub_dm = [&]() {
545
546 auto create_dm = [&](
547
548 std::string problem_name,
549 std::vector<std::string> fe_names,
550 std::vector<std::string> fields,
551
552 auto dm_type
553
554 ) {
555 auto dm = createDM(mField.get_comm(), dm_type);
556 auto create_dm_imp = [&]() {
558 CHKERR DMMoFEMCreateSubDM(dm, simple->getDM(), problem_name.c_str());
559 CHKERR DMMoFEMSetSquareProblem(dm, PETSC_TRUE);
560 for (auto fe : fe_names) {
562 }
563 CHKERR DMMoFEMAddElement(dm, simple->getSkeletonFEName());
564 for (auto field : fields) {
565 CHKERR DMMoFEMAddSubFieldRow(dm, field);
566 CHKERR DMMoFEMAddSubFieldCol(dm, field);
567 }
568 CHKERR DMSetUp(dm);
570 };
572 create_dm_imp(),
573 "Error in creating schurDM. It is possible that schurDM is "
574 "already created");
575 return dm;
576 };
577
578 auto schur_dm = create_dm(
579
580 "SCHUR",
581
582 {simple->getDomainFEName(), simple->getSkeletonFEName()},
583
584 {"HYBRID"},
585
586 "DMMOFEM_MG");
587
588 auto block_dm = create_dm(
589
590 "BLOCK",
591
592 {simple->getDomainFEName(), simple->getSkeletonFEName()},
593
594 {"BROKEN", "U"},
595
596 "DMMOFEM");
597
598 return std::make_tuple(schur_dm, block_dm);
599 };
600
601 auto get_nested_mat_data = [&](auto schur_dm, auto block_dm) {
602 auto block_mat_data = createBlockMatStructure(
603 simple->getDM(),
604
605 {
606
607 {
608
609 simple->getDomainFEName(),
610
611 {
612
613 {"BROKEN", "BROKEN"},
614 {"U", "U"},
615 {"BROKEN", "U"},
616 {"U", "BROKEN"}
617
618 }
619
620 },
621
622 {
623
624 simple->getSkeletonFEName(),
625
626 {
627
628 {"BROKEN", "HYBRID"}, {"HYBRID", "BROKEN"}
629
630 }
631
632 }
633
634 }
635
636 );
637
639
640 {schur_dm, block_dm}, block_mat_data,
641
642 {"BROKEN", "U"}, {nullptr, nullptr}, true
643
644 );
645 };
646
647 auto set_ops = [&](auto schur_dm) {
649 auto dm_is = getDMSubData(schur_dm)->getSmartRowIs();
650 auto ao_up = createAOMappingIS(dm_is, PETSC_NULL);
651
652 boost::shared_ptr<BlockStructure> block_data;
653 CHKERR DMMoFEMGetBlocMatData(simple->getDM(), block_data);
654
655 pip_mng->getOpDomainLhsPipeline().push_front(
657 pip_mng->getOpDomainLhsPipeline().push_back(
658
659 createOpSchurAssembleEnd({"BROKEN", "U"}, {nullptr, nullptr}, ao_up,
660 S, true, true)
661
662 );
663
664 auto pre_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
665 auto post_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
666
667 pre_proc_schur_lhs_ptr->preProcessHook = [this]() {
669 CHKERR MatZeroEntries(S);
670 MOFEM_LOG("AT", Sev::verbose) << "Lhs Assemble Begin";
672 };
673
674 post_proc_schur_lhs_ptr->postProcessHook = [this, ao_up,
675 post_proc_schur_lhs_ptr]() {
677 MOFEM_LOG("AT", Sev::verbose) << "Lhs Assemble End";
678 CHKERR MatAssemblyBegin(S, MAT_FINAL_ASSEMBLY);
679 CHKERR MatAssemblyEnd(S, MAT_FINAL_ASSEMBLY);
680 MOFEM_LOG("AT", Sev::verbose) << "Lhs Assemble Finish";
682 };
683
684 auto ksp_ctx_ptr = getDMKspCtx(simple->getDM());
685 ksp_ctx_ptr->getPreProcSetOperators().push_front(pre_proc_schur_lhs_ptr);
686 ksp_ctx_ptr->getPostProcSetOperators().push_back(post_proc_schur_lhs_ptr);
687
689 };
690
691 auto set_pc = [&](auto pc, auto block_dm) {
693 auto block_is = getDMSubData(block_dm)->getSmartRowIs();
694 CHKERR PCFieldSplitSetIS(pc, NULL, block_is);
695 CHKERR PCFieldSplitSetSchurPre(pc, PC_FIELDSPLIT_SCHUR_PRE_USER, S);
697 };
698
699 auto set_diagonal_pc = [&](auto pc, auto schur_dm) {
701
702 if (AT == BLOCK_SCHUR) {
703 auto A = createDMBlockMat(simple->getDM());
704 auto P = createDMNestSchurMat(simple->getDM());
705 CHKERR PCSetOperators(pc, A, P);
706 }
707
708 KSP *subksp;
709 CHKERR PCFieldSplitSchurGetSubKSP(pc, PETSC_NULL, &subksp);
710 auto get_pc = [](auto ksp) {
711 PC pc_raw;
712 CHKERR KSPGetPC(ksp, &pc_raw);
713 return pc_raw;
714 };
715 CHKERR setSchurA00MatSolvePC(SmartPetscObj<PC>(get_pc(subksp[0]), true));
716
717 auto set_pc_p_mg = [&](auto dm, auto pc) {
719
720 CHKERR PCSetDM(pc, dm);
721 PetscBool same = PETSC_FALSE;
722 PetscObjectTypeCompare((PetscObject)pc, PCMG, &same);
723 if (same) {
724 // By default do not use shell mg mat. Implementation of SOR is slow.
726 pc, createPCMGSetUpViaApproxOrdersCtx(dm, S, false));
727 CHKERR PCSetFromOptions(pc);
728 }
730 };
731
732 CHKERR set_pc_p_mg(schur_dm, get_pc(subksp[1]));
733
734 CHKERR PetscFree(subksp);
736 };
737
738 auto [schur_dm, block_dm] = create_sub_dm();
739 if (AT == BLOCK_SCHUR) {
740 auto nested_mat_data = get_nested_mat_data(schur_dm, block_dm);
741 CHKERR DMMoFEMSetNestSchurData(simple->getDM(), nested_mat_data);
742 }
743 S = createDMHybridisedL2Matrix(schur_dm);
744 CHKERR MatSetDM(S, PETSC_NULL);
745 int bs = (SPACE_DIM == 2) ? NBEDGE_L2(approx_order - 1)
747 CHKERR MatSetBlockSize(S, bs);
748
749 CHKERR set_ops(schur_dm);
750 CHKERR set_pc(pc, block_dm);
751 DM solver_dm;
752 CHKERR KSPGetDM(ksp, &solver_dm);
753 if (AT == BLOCK_SCHUR)
754 CHKERR DMSetMatType(solver_dm, MATSHELL);
755
756 CHKERR KSPSetUp(ksp);
757 if (AT == BLOCK_SCHUR)
758 CHKERR set_diagonal_pc(pc, schur_dm);
759
760 } else {
761 SETERRQ(PETSC_COMM_SELF, MOFEM_ATOM_TEST_INVALID,
762 "PC is not set to PCFIELDSPLIT");
763 }
765}
766
767boost::shared_ptr<SetUpSchur>
769 return boost::shared_ptr<SetUpSchur>(new SetUpSchurImpl(m_field));
770}
Integrator for broken space constraints.
#define MOFEM_LOG_C(channel, severity, format,...)
void simple(double P1[], double P2[], double P3[], double c[], const int N)
Definition acoustic.cpp:69
int main()
static const double eps
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpSource< 1, FIELD_DIM > OpDomainSource
ElementsAndOps< SPACE_DIM >::BoundaryEle BoundaryEle
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
[Define dimension]
#define SCHUR_ASSEMBLE
Definition contact.cpp:18
#define CATCH_ERRORS
Catch errors.
FieldApproximationBase
approximation base
Definition definitions.h:58
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base nme:nme847.
Definition definitions.h:60
@ AINSWORTH_LOBATTO_BASE
Definition definitions.h:62
@ DEMKOWICZ_JACOBI_BASE
Definition definitions.h:66
@ AINSWORTH_BERNSTEIN_BEZIER_BASE
Definition definitions.h:64
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
FieldSpace
approximation spaces
Definition definitions.h:82
@ L2
field with C-1 continuity
Definition definitions.h:88
@ NOSPACE
Definition definitions.h:83
@ HCURL
field with continuous tangents
Definition definitions.h:86
@ HDIV
field with continuous normal traction
Definition definitions.h:87
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
@ MOFEM_IMPOSSIBLE_CASE
Definition definitions.h:35
@ MOFEM_ATOM_TEST_INVALID
Definition definitions.h:40
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
auto integration_rule
PetscErrorCode DMMoFEMCreateSubDM(DM subdm, DM dm, const char problem_name[])
Must be called by user to set Sub DM MoFEM data structures.
Definition DMMoFEM.cpp:215
PetscErrorCode DMMoFEMAddElement(DM dm, std::string fe_name)
add element to dm
Definition DMMoFEM.cpp:497
PetscErrorCode DMMoFEMSetSquareProblem(DM dm, PetscBool square_problem)
set squared problem
Definition DMMoFEM.cpp:456
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
Definition DMMoFEM.cpp:523
PetscErrorCode DMMoFEMAddSubFieldRow(DM dm, const char field_name[])
Definition DMMoFEM.cpp:238
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
Definition DMMoFEM.cpp:43
MoFEMErrorCode DMRegister_MGViaApproxOrders(const char sname[])
Register DM for Multi-Grid via approximation orders.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition DMMoFEM.cpp:586
auto createDMVector(DM dm)
Get smart vector from DM.
Definition DMMoFEM.hpp:1099
PetscErrorCode DMMoFEMAddSubFieldCol(DM dm, const char field_name[])
Definition DMMoFEM.cpp:280
IntegrationType
Form integrator integration types.
AssemblyType
[Storage and set boundary conditions]
static LoggerType & setLog(const std::string channel)
Set ans resset chanel logger.
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
#define NBFACETRI_L2(P)
Number of base functions on triangle for L2 space.
#define NBEDGE_L2(P)
Number of base functions on edge from L2 space.
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpMixDivTimesScalar< 2 > OpHdivU
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
Definition Common.hpp:10
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range > > field_ents, SmartPetscObj< AO > ao, SmartPetscObj< Mat > schur, bool sym_schur, bool symm_op)
Construct a new Op Schur Assemble End object.
Definition Schur.cpp:2186
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
MoFEMErrorCode DMMoFEMGetBlocMatData(DM dm, boost::shared_ptr< BlockStructure > &)
Get data for block mat.
Definition DMMoFEM.cpp:1542
MoFEMErrorCode setSchurA00MatSolvePC(SmartPetscObj< PC > pc)
Set PC for A00 block.
Definition Schur.cpp:2223
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
boost::shared_ptr< PCMGSetUpViaApproxOrdersCtx > createPCMGSetUpViaApproxOrdersCtx(DM dm, Mat A, bool use_shell_mat)
createPCMGSetUpViaApproxOrdersCtx
auto createVectorMPI(MPI_Comm comm, PetscInt n, PetscInt N)
Create MPI Vector.
auto getDMKspCtx(DM dm)
Get KSP context data structure used by DM.
Definition DMMoFEM.hpp:1113
auto createDMHybridisedL2Matrix(DM dm)
Get smart hybridised L2 matrix from DM.
Definition DMMoFEM.hpp:1069
auto getDMSubData(DM dm)
Get sub problem data structure.
Definition DMMoFEM.hpp:1157
MoFEMErrorCode PCMGSetUpViaApproxOrders(PC pc, boost::shared_ptr< PCMGSetUpViaApproxOrdersCtx > ctx, int verb)
Function build MG structure.
boost::shared_ptr< BlockStructure > createBlockMatStructure(DM dm, SchurFEOpsFEandFields schur_fe_op_vec)
Create a Mat Diag Blocks object.
Definition Schur.cpp:1009
boost::shared_ptr< NestSchurData > createSchurNestedMatrixStruture(std::pair< SmartPetscObj< DM >, SmartPetscObj< DM > > dms, boost::shared_ptr< BlockStructure > block_mat_data_ptr, std::vector< std::string > fields_names, std::vector< boost::shared_ptr< Range > > field_ents, bool add_preconditioner_block)
Get the Schur Nest Mat Array object.
Definition Schur.cpp:1944
auto createAOMappingIS(IS isapp, IS ispetsc)
Creates an application mapping using two index sets.
PetscErrorCode PetscOptionsGetEList(PetscOptions *, const char pre[], const char name[], const char *const *list, PetscInt next, PetscInt *value, PetscBool *set)
MoFEMErrorCode DMMoFEMSetNestSchurData(DM dm, boost::shared_ptr< NestSchurData >)
Definition DMMoFEM.cpp:1562
auto createDMNestSchurMat(DM dm)
Definition DMMoFEM.hpp:1083
auto createDM(MPI_Comm comm, const std::string dm_type_name)
Creates smart DM object.
OpSchurAssembleBase * createOpSchurAssembleBegin()
Definition Schur.cpp:2181
auto createDMBlockMat(DM dm)
Definition DMMoFEM.hpp:1076
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
#define EXECUTABLE_DIMENSION
Definition plastic.cpp:13
PipelineManager::ElementsAndOpsByDim< SPACE_DIM >::FaceSideEle EleOnSide
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpMixDivTimesU< 3, 1, 2 > OpHDivH
Integrate Rhs div flux base times temperature (T)
Definition seepage.cpp:115
Add operators pushing bases from local to physical configuration.
Simple interface for fast problem set-up.
Definition BcManager.hpp:25
MoFEMErrorCode removeSideDOFs(const std::string problem_name, const std::string block_name, const std::string field_name, int bridge_dim, int lo, int hi, bool is_distributed_mesh=true)
Remove side DOFs.
virtual moab::Interface & get_moab()=0
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
Core (interface) class.
Definition Core.hpp:82
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
Definition Core.cpp:72
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Definition Core.cpp:112
base operator to do operations at Gauss Pt. level
Deprecated interface functions.
Data on single entity (This is passed as argument to DataOperator::doWork)
static boost::shared_ptr< SinkType > createSink(boost::shared_ptr< std::ostream > stream_ptr, std::string comm_filter)
Create a sink object.
static boost::shared_ptr< std::ostream > getStrmWorld()
Get the strm world object.
Get norm of input VectorDouble for Tensor0.
Get norm of input MatrixDouble for Tensor1.
Calculate tenor field using vectorial base, i.e. Hdiv/Hcurl.
Get vector field for H-div approximation.
Calculate divergence of vector field.
Get field gradients at integration pts for scalar filed rank 0, i.e. vector field.
Get value at integration points for scalar field.
Get values at integration pts for tensor filed rank 1, i.e. vector field.
Get values from scalar function at integration points and save them to VectorDouble for Tensor0.
Element used to execute operators on side of the element.
Post post-proc data at points from hash maps.
PipelineManager interface.
Simple interface for fast problem set-up.
Definition Simple.hpp:27
intrusive_ptr for managing petsc objects
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
[Push operators to pipeline]
SetUpSchur()=default
virtual MoFEMErrorCode setUp(SmartPetscObj< KSP >)=0
virtual ~SetUpSchur()=default
static boost::shared_ptr< SetUpSchur > createSetUpSchur(MoFEM::Interface &m_field)
virtual ~SetUpSchurImpl()=default
SmartPetscObj< Mat > S
SetUpSchurImpl(MoFEM::Interface &m_field)
MoFEMErrorCode setUp(SmartPetscObj< KSP >)
MoFEM::Interface & mField
constexpr AssemblyType AT
int approx_order
constexpr IntegrationType IT
static char help[]
constexpr int SPACE_DIM
constexpr bool debug
BoundaryEle::UserDataOperator BdyEleOp
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpBaseTimesVector< 3, SPACE_DIM, 1 > OpHdivFlux
Integrating Rhs flux base (1/k) flux (FLUX)
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpMass< 3, SPACE_DIM > OpHdivHdiv
[Linear elastic problem]