#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/mapping_q1.h>
#include <deal.II/fe/fe_dgq.h>
#include <deal.II/fe/fe_face.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/generic_linear_algebra.h>
#include <array>
#include <vector>
/**
* See README file for details
*/
using namespace dealii;
namespace LA
{
using namespace ::LinearAlgebraPETSc;
}
int main(int argc, char **argv)
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
parallel::distributed::Triangulation<2> triang(MPI_COMM_WORLD);
GridGenerator::hyper_cube(triang);
triang.refine_global(5);
const MappingQ1<2> mapping;
const uint degree = 1;
FE_DGQ<2> fe(degree);
FE_FaceQ<2> fe_face(degree);
const std::array<uint, GeometryInfo<2>::faces_per_cell> face_first_dof{0, degree, 0, (degree+1)*degree};
const std::array<uint, GeometryInfo<2>::faces_per_cell> face_dof_increment{degree+1, degree+1, 1, 1};
DoFHandler<2> dof_handler(triang);
dof_handler.distribute_dofs(fe);
IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
IndexSet locally_relevant_dofs;
locally_relevant_dofs = locally_owned_dofs; // initialise with owned dofs
uint face_id, face_id_neighbor, i; // face ids wrt owner and neighbor
std::vector<uint> dof_ids_neighbor(fe.dofs_per_cell);
for(auto &cell: dof_handler.active_cell_iterators()){
if(!(cell->is_locally_owned())) continue;
for(face_id=0; face_id<GeometryInfo<2>::faces_per_cell; face_id++){
if(cell->face(face_id)->at_boundary()) continue;
if(cell->neighbor(face_id)->is_ghost()){
// current face lies at subdomain interface
// add dofs on this face (wrt neighbor) to locally relevant dofs
cell->neighbor(face_id)->get_dof_indices(dof_ids_neighbor);
face_id_neighbor = cell->neighbor_of_neighbor(face_id);
for(i=0; i<fe_face.dofs_per_face; i++){
locally_relevant_dofs.add_index(
dof_ids_neighbor[
face_first_dof[face_id_neighbor] +
i*face_dof_increment[face_id_neighbor]
]
);
} // loop over face dofs
}
} // loop over internal faces
} // loop over locally owned cells
ConditionalOStream pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0));
pcout << "Yosh\n";
std::array<LA::MPI::Vector, 4> vecs;
std::array<LA::MPI::Vector, 4> gh_vecs;
LA::MPI::Vector scaler;
for(uint var=0; var<4; var++){
vecs[var].reinit(locally_owned_dofs, MPI_COMM_WORLD);
gh_vecs[var].reinit(locally_owned_dofs, locally_relevant_dofs, MPI_COMM_WORLD);
}
scaler.reinit(locally_owned_dofs, MPI_COMM_WORLD);
for(uint i: locally_owned_dofs){
scaler[i] = 1.0*i;
}
std::vector<uint> dof_ids(fe.dofs_per_cell);
// setting ops
for(auto &cell: dof_handler.active_cell_iterators()){
if(!(cell->is_locally_owned())) continue;
cell->get_dof_indices(dof_ids);
pcout << "\tCell " << cell->index() << "\n";
pcout << "\t\tSetting\n";
for(uint var=0; var<4; var++){
for(uint i: dof_ids){
vecs[var][i] = 1.0*i;
}
vecs[var].compress(VectorOperation::insert);
}
// addition ops
pcout << "\t\tAdding\n";
for(uint var=0; var<4; var++){
for(uint i: dof_ids){
vecs[var][i] += 1.0*i;
}
vecs[var].compress(VectorOperation::add);
}
// more ops
pcout << "\t\tMore additions\n";
for(uint var=0; var<4; var++){
for(uint i: dof_ids){
vecs[var][i] += -5.0*i;
}
vecs[var].compress(VectorOperation::add);
}
} // loop over owned cells
// scaling and communicating
pcout << "Scaling and communicating\n";
for(uint var=0; var<4; var++){
vecs[var].scale(scaler);
pcout << "Scaled variable " << var << "\n";
gh_vecs[var] = vecs[var];
pcout << "Communicated variable " << var << "\n";
}
pcout << "Completed all\n";
return 0;
}