#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // // Including cell_data_transfer.h did not work. Had to include cell_data_transfer.templates.h why? // //#include #include namespace LA { #if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \ !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS)) using namespace dealii::LinearAlgebraPETSc; #define USE_PETSC_LA #elif defined(DEAL_II_WITH_TRILINOS) using namespace dealii::LinearAlgebraTrilinos; #else #error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required #endif } // namespace LA using namespace dealii; class FE_nothing_test { private: enum { fe_q = 0, fe_nothing = 1 }; // index in m_fe_collection MPI_Comm m_mpi_communicator; parallel::distributed::Triangulation<2, 2> m_triangulation; DoFHandler<2> m_dof_handler; parallel::distributed::SolutionTransfer<2, LA::MPI::Vector> m_solution_trans; parallel::distributed::CellDataTransfer<2, 2, std::vector>> m_cell_data_trans; LA::MPI::Vector m_completely_distributed_solution; LA::MPI::Vector m_locally_relevant_solution; IndexSet m_locally_owned_dofs; IndexSet m_locally_relevant_dofs; hp::FECollection<2> m_fe_collection; LA::MPI::Vector m_previous_locally_relevant_solution; int myRank; const double m_largeNum; public: FE_nothing_test() : m_mpi_communicator(MPI_COMM_WORLD), m_triangulation( m_mpi_communicator, typename Triangulation<2>::MeshSmoothing(Triangulation<2>::none)), m_dof_handler(m_triangulation), m_solution_trans(m_dof_handler), m_cell_data_trans(m_triangulation, /*transfer_variable_size_data=*/true), myRank(Utilities::MPI::this_mpi_process(m_mpi_communicator)), m_largeNum(10E36) { m_fe_collection.push_back(FE_Q<2>(1)); m_fe_collection.push_back(FE_Nothing<2>()); } void run() { std::ofstream logfile("output.txt"); deallog.attach(logfile); deallog.depth_console(0); std::vector repetitions{4, 4}; const Point<2> p1(0, 0), p2(1, 1); GridGenerator::subdivided_hyper_rectangle(m_triangulation, repetitions, p1, p2, false); double rowHeight = p2[1] / repetitions[1]; int step = 0; // Assign FE_Q cells to the bottom row, top 3 rows are FE_Nohting for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { auto center = cell->center(); if (center(1) < rowHeight * (step + 1)) { cell->set_active_fe_index(fe_q); } else { cell->set_active_fe_index(fe_nothing); } } } // distribute dofs and set up solution vectors setUpSolutions(); double initialValue = 5; VectorTools::interpolate(m_dof_handler, Functions::ConstantFunction<2>(initialValue), m_completely_distributed_solution); m_locally_relevant_solution = m_completely_distributed_solution; std::cout << " Output 1 from " << myRank << std::endl; output(step); // // activate rows // for (step = 1; step < repetitions[1]; step++) { p_coarsening_and_refinement_solution_transfer(step, rowHeight, initialValue); } std::cout << " Output 3 from " << myRank << std::endl; output(step); } void setUpSolutions() { m_dof_handler.distribute_dofs(m_fe_collection); m_locally_owned_dofs = m_dof_handler.locally_owned_dofs(); m_locally_relevant_dofs.clear(); DoFTools::extract_locally_relevant_dofs(m_dof_handler, m_locally_relevant_dofs); m_completely_distributed_solution.reinit(m_locally_owned_dofs, m_mpi_communicator); m_locally_relevant_solution.reinit(m_locally_owned_dofs, m_locally_relevant_dofs, m_mpi_communicator); } void p_coarsening_and_refinement_solution_transfer(int step, double rowHeight, double initialValue) { std::cout << " Activate row number " << step + 1 << std::endl; for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { auto center = cell->center(); if (center(1) < rowHeight * (step + 1)) { cell->set_future_fe_index(fe_q); } else { cell->set_future_fe_index(fe_nothing); } } } std::vector> data_to_transfer(m_triangulation.n_active_cells()); int i = 0; for (auto &cell : m_dof_handler.active_cell_iterators()) { std::vector cell_data; if (cell->is_locally_owned()) if (cell->active_fe_index() == fe_q) { cell_data.resize(cell->get_fe().n_dofs_per_cell()); cell->get_dof_values(m_locally_relevant_solution, cell_data.begin(), cell_data.end()); } data_to_transfer[i] = cell_data; i++; } m_triangulation.prepare_coarsening_and_refinement(); m_cell_data_trans.prepare_for_coarsening_and_refinement(data_to_transfer); m_triangulation.execute_coarsening_and_refinement(); setUpSolutions(); VectorTools::interpolate(m_dof_handler, Functions::ConstantFunction<2>(m_largeNum), m_completely_distributed_solution); std::vector> transferred_data(m_triangulation.n_active_cells()); m_cell_data_trans.unpack(transferred_data); i = 0; for (auto &cell : m_dof_handler.active_cell_iterators()) { const std::vector &cell_data = transferred_data[i]; if (cell_data.size() != 0) { // copy cell_data to local_data, because set_dof_values can only take in a Vector<> Vector local_data(cell_data.size()); std::copy(cell_data.begin(), cell_data.end(), local_data.begin()); cell->set_dof_values(local_data, m_completely_distributed_solution); // // Does set_dof_values sets entries in m_completely_distributed_solution that are not owned by the current rank? // } i++; } // // Is it ok to call compress with VectorOperation::insert here? // Will calling compress with insert, create entries in m_completely_distributed_solution that are not owned by this processor? // m_completely_distributed_solution.compress(VectorOperation::insert); // // Does this following line assign ghost values on all processors by exchanging data over MPI? // m_locally_relevant_solution = m_completely_distributed_solution; // Look for the dofs that have m_largeNum, those were newly created and set the initial value to them for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) if (cell->active_fe_index() == fe_q) { Vector local_data(cell->get_fe().n_dofs_per_cell()); cell->get_dof_values(m_locally_relevant_solution, local_data); for (unsigned int j = 0; j < local_data.size(); j++) { //std::cout << "local_data = " << local_data[j] << std::endl; if (std::abs(local_data[j] - m_largeNum) < 0.0001) { local_data[j] = initialValue; } } cell->set_dof_values(local_data, m_completely_distributed_solution); // // Does set_dof_values sets entries in m_completely_distributed_solution that are not owned by the current rank? // } } // // Is it ok to call compress with VectorOperation::insert here? // Will calling compress with insert, create entries in m_completely_distributed_solution that are not owned by this processor? // m_completely_distributed_solution.compress(VectorOperation::insert); // // Does this following line assign ghost values on all processors by exchanging data over MPI? // m_locally_relevant_solution = m_completely_distributed_solution; } void output(int step) { Vector FE_Type(m_triangulation.n_active_cells()); Vector subdomain(m_triangulation.n_active_cells()); int i = 0; for (auto &cell : m_dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { FE_Type(i) = cell->active_fe_index(); subdomain(i) = m_triangulation.locally_owned_subdomain(); } else { FE_Type(i) = -1; subdomain(i) = -1; } i++; } const unsigned int n_subdivisions = 0; //3; DataOut<2> data_out; data_out.attach_dof_handler(m_dof_handler); data_out.add_data_vector(m_locally_relevant_solution, "Solution"); data_out.add_data_vector(FE_Type, "FE_Type"); data_out.add_data_vector(subdomain, "subdomain"); data_out.build_patches(n_subdivisions); data_out.write_vtu_with_pvtu_record( "./", "solution", step, m_mpi_communicator, 2); } }; int main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); FE_nothing_test test; test.run(); }