// Test case based on the one written by K. Bzowski #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // uncomment the following \#define if you have PETSc and Trilinos installed // and you prefer using Trilinos in this example: // @code // #define FORCE_USE_OF_TRILINOS // @endcode // This will either import PETSc or TrilinosWrappers into the namespace // LA. Note that we are defining the macro USE_PETSC_LA so that we can detect // if we are using PETSc (see solve() for an example where this is necessary) // This LA namespace must be after including namespace LA { #if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \ !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS)) using namespace dealii::LinearAlgebraPETSc; #define USE_PETSC_LA #elif defined(DEAL_II_WITH_TRILINOS) using namespace dealii::LinearAlgebraTrilinos; #else #error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required #endif } // namespace LA #include using namespace dealii; class FE_nothing_test { private: MPI_Comm mpi_communicator; parallel::distributed::Triangulation<2, 2> m_triangulation; DoFHandler<2> m_dof_handler; parallel::distributed::SolutionTransfer<2, LA::MPI::Vector> m_solution_trans; LA::MPI::Vector m_completely_distributed_solution; LA::MPI::Vector m_locally_relevant_solution; IndexSet m_locally_owned_dofs; IndexSet m_locally_relevant_dofs; hp::FECollection<2> m_fe_collection; LA::MPI::Vector m_previous_locally_relevant_solution; public: FE_nothing_test() : mpi_communicator(MPI_COMM_WORLD), m_triangulation( mpi_communicator, typename Triangulation<2>::MeshSmoothing(Triangulation<2>::none)), m_dof_handler(m_triangulation), m_solution_trans(m_dof_handler) { } void run() { std::ofstream logfile("output"); deallog.attach(logfile); deallog.depth_console(0); GridGenerator::hyper_cube(m_triangulation); m_triangulation.refine_global(1); m_fe_collection.push_back(FE_Q<2>(1)); m_fe_collection.push_back(FE_Nothing<2>()); // m_fe_collection.push_back(FE_Nothing<2>(1, true)); int step = 1; // Assign FE /* * ----------- * | 1 | 1 | * ----------- * | 0 | 0 | 0 - FEQ, 1 - FE_Nothing * ----------- */ for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { auto center = cell->center(); if (center(1) < 0.5) { cell->set_active_fe_index(0); } else { cell->set_active_fe_index(1); } } } setUpSolutions(); m_completely_distributed_solution = 1.0; m_locally_relevant_solution = m_completely_distributed_solution; output(step); // // h-refinement // step = 2; /* Set refine flags: * ----------- * | R | R | * |---------| * | R | R | * ----------- */ for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { cell->set_refine_flag(); } } coarsening_and_refinement(); setUpSolutions(); solution_transfer(); output(step); // // p-refinement // step = 3; // // assign turn on two extra cells at the top half // for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { auto center = cell->center(); if (center(1) < 0.75) { cell->set_future_fe_index(0); } else { cell->set_future_fe_index(1); } } } coarsening_and_refinement(); setUpSolutions(); solution_transfer(); output(step); } void setUpSolutions() { m_dof_handler.distribute_dofs(m_fe_collection); m_locally_owned_dofs = m_dof_handler.locally_owned_dofs(); m_locally_relevant_dofs.clear(); DoFTools::extract_locally_relevant_dofs(m_dof_handler, m_locally_relevant_dofs); m_completely_distributed_solution.reinit(m_locally_owned_dofs, mpi_communicator); m_locally_relevant_solution.reinit(m_locally_owned_dofs, m_locally_relevant_dofs, mpi_communicator); } void coarsening_and_refinement() { m_previous_locally_relevant_solution = m_locally_relevant_solution; m_triangulation.prepare_coarsening_and_refinement(); m_solution_trans.prepare_for_coarsening_and_refinement(m_previous_locally_relevant_solution); m_triangulation.execute_coarsening_and_refinement(); } void solution_transfer() { m_solution_trans.interpolate(m_completely_distributed_solution); m_locally_relevant_solution = m_completely_distributed_solution; } void output(int step) { Vector FE_Type(m_triangulation.n_active_cells()); Vector subdomain(m_triangulation.n_active_cells()); int i = 0; for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { FE_Type(i) = cell->active_fe_index(); subdomain(i) = m_triangulation.locally_owned_subdomain(); } else { FE_Type(i) = -1; subdomain(i) = -1; } i++; } const unsigned int n_subdivisions = 0; //3; DataOut<2> data_out; data_out.attach_dof_handler(m_dof_handler); data_out.add_data_vector(m_locally_relevant_solution, "Solution"); data_out.add_data_vector(FE_Type, "FE_Type"); data_out.add_data_vector(subdomain, "subdomain"); data_out.build_patches(n_subdivisions); data_out.write_vtu_with_pvtu_record( "./", "solution", step, mpi_communicator, 2); } }; int main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); FE_nothing_test test; test.run(); }