I am encountering an error when running the following program (only relevant parts):
class NedRTStd
{
public:
struct Parameters
{
// Some parameters go here
};
NedRTStd () = delete;
NedRTStd (Parameters ¶meters);
~NedRTStd ();
void run ();
private:
void setup_grid ();
void setup_system_matrix ();
void setup_constraints ();
void assemble_system ();
void solve_direct ();
void solve_iterative ();
void solve_iterative_preconditioned ();
void output_results () const;
MPI_Comm mpi_communicator;
Parameters ¶meters;
parallel::distributed::Triangulation<3> triangulation;
FESystem<3> fe; // Will hold 2 blocks (Nedelec-Raviart-Thomas pair)
DoFHandler<3> dof_handler;
std::vector<IndexSet> owned_partitioning;
std::vector<IndexSet> relevant_partitioning;
AffineConstraints<double> constraints;
LA::MPI::BlockSparseMatrix system_matrix;
LA::MPI::BlockVector locally_relevant_solution;
LA::MPI::BlockVector system_rhs;
ConditionalOStream pcout;
TimerOutput computing_timer;
};
NedRTStd::NedRTStd (Parameters ¶meters_)
:
mpi_communicator(MPI_COMM_WORLD),
parameters(parameters_),
triangulation(mpi_communicator,
typename Triangulation<3>::MeshSmoothing(
Triangulation<3>::smoothing_on_refinement |
Triangulation<3>::smoothing_on_coarsening)),
fe (FE_Nedelec<3>(1), 1,
FE_RaviartThomas<3>(1), 1),
dof_handler (triangulation),
pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)),
computing_timer(mpi_communicator,
pcout,
TimerOutput::summary,
TimerOutput::wall_times)
{
}
void NedRTStd::setup_system_matrix ()
{
TimerOutput::Scope t(computing_timer, "system and constraint setup");
dof_handler.distribute_dofs (fe);
if (parameters.renumber_dofs)
{
DoFRenumbering::Cuthill_McKee (dof_handler);
}
DoFRenumbering::block_wise (dof_handler);
//////////////////////////////////////
// fe has 2 blocks and 6 components
//////////////////////////////////////
std::vector<types::global_dof_index> dofs_per_block (2);
DoFTools::count_dofs_per_block (dof_handler, dofs_per_block);
const unsigned int n_sigma = dofs_per_block[0],
n_u = dofs_per_block[1];
pcout << "Number of active cells: "
<< triangulation.n_global_active_cells()
<< " (on " << triangulation.n_levels() << " levels)"
<< std::endl
<< "Number of degrees of freedom: "
<< dof_handler.n_dofs()
<< " (" << n_sigma << '+' << n_u << ')'
<< std::endl;
owned_partitioning.resize(2);
owned_partitioning[0] = dof_handler.locally_owned_dofs().get_view(0, n_sigma);
owned_partitioning[1] = dof_handler.locally_owned_dofs().get_view(n_sigma, n_sigma + n_u);
IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
relevant_partitioning.resize(2);
relevant_partitioning[0] = locally_relevant_dofs.get_view(0, n_sigma);
relevant_partitioning[1] = locally_relevant_dofs.get_view(n_sigma, n_sigma + n_u);
setup_constraints (); // Only hanging nodes here
{
BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
DoFTools::make_sparsity_pattern( dof_handler, dsp, constraints, false);
SparsityTools::distribute_sparsity_pattern(dsp,
dof_handler.locally_owned_dofs_per_processor(),
mpi_communicator,
locally_relevant_dofs);
system_matrix.clear();
system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
}
//////////////////////////////////////
// Here something happens...
//////////////////////////////////////
locally_relevant_solution.reinit(owned_partitioning,
relevant_partitioning,
mpi_communicator);
//////////////////////////////////////
// Here also
//////////////////////////////////////
system_rhs.reinit(owned_partitioning, mpi_communicator);
}