Following is my implementation:Cycle 3:Number of active cells: 33018Number of degrees of freedom: 273182Number of constraints : 104952terminate called after throwing an instance of 'dealii::SparseDirectUMFPACK::ExcUMFPACKError'what():--------------------------------------------------------An error occurred in line <288> of file </.../deal.II/source/lac/sparse_direct.cc> in functionvoid dealii::SparseDirectUMFPACK::factorize(const Matrix&) [with Matrix = dealii::SparseMatrix<double>]The violated condition was:status == UMFPACK_OKThe name and call sequence of the exception was:ExcUMFPACKError("umfpack_dl_numeric", status)Additional Information:UMFPACK routine umfpack_dl_numeric returned error status -1. See the file <bundled/umfpack/UMFPACK/Include/umfpack.h> for a description of 'status codes'.Stacktrace:-----------#0 /opt/deal.II.8.0.0/lib/libdeal_II.g.so.8.0.0: void dealii::SparseDirectUMFPACK::factorize<dealii::SparseMatrix<double> >(dealii::SparseMatrix<double> const&)#1 /opt/deal.II.8.0.0/lib/libdeal_II.g.so.8.0.0: void dealii::SparseDirectUMFPACK::initialize<dealii::SparseMatrix<double> >(dealii::SparseMatrix<double> const&, dealii::SparseDirectUMFPACK::AdditionalData) #2 ./h_csem: MaxwellProblem<3>::run()#3 ./h_csem: main--------------------------------------------------------
template <int dim>void MaxwellProblem<dim>::setup_system ()
{
dof_handler.distribute_dofs (fe);
solution.reinit (dof_handler.n_dofs());
system_rhs.reinit (dof_handler.n_dofs());
constraints.clear ();
VectorTools::project_boundary_values_curl_conforming(dof_handler, 0,
ZeroFunction<dim>(fe.n_components()), 0, constraints);
VectorTools::project_boundary_values_curl_conforming(dof_handler, dim,
ZeroFunction<dim>(fe.n_components()), 0, constraints);
DoFTools::make_hanging_node_constraints (dof_handler, constraints);
constraints.close ();
CompressedSetSparsityPattern csp (dof_handler.n_dofs(), dof_handler.n_dofs());
DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false);
sparsity_pattern.copy_from (csp);
system_matrix.reinit (sparsity_pattern);
}
template <int dim>
void MaxwellProblem<dim>::assemble_system (){
QGauss<dim> quadrature_formula(2);
FEValues<dim> fe_values (fe, quadrature_formula,
update_values | update_gradients |
update_quadrature_points | update_JxW_values);
const size_t dofs_per_cell = fe.dofs_per_cell;
const size_t n_q_points = quadrature_formula.size();
FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);Vector<double> cell_rhs(dofs_per_cell);
std::vector<size_t> local_dof_indices(dofs_per_cell);
std::vector< Point<dim> > epr_rhs(n_q_points);
std::vector< Point<dim> > epi_rhs(n_q_points);
const FEValuesExtractors::Vector Er(0);
const FEValuesExtractors::Vector Ei(dim);
size_t i, j, q_point;
double omega, sigma, d_sigma;
omega = 2 * numbers::PI * f;
auto cell = dof_handler.begin_active();
for (; cell != dof_handler.end(); ++cell)
{fe_values.reinit (cell);
cell_matrix = 0;
cell_rhs = 0;
Can someone provide some hints ?
sigma = 1.0 / rho[cell->material_id() - 1];
d_sigma = sigma - 1.0 / bg_rho[cell->material_id() - 1];
// Call some functions to calculate the right hand side epr_rhs,
// epi_rhs here.
for (i = 0; i < dofs_per_cell; ++i) {
auto base_index_i = fe.system_to_base_index(i);
for (j = 0; j < dofs_per_cell; ++j) {
auto base_index_j = fe.system_to_base_index(j);
if (base_index_i.first.first == base_index_j.first.first) {
if (base_index_i.first.first == 0) {
for (q_point = 0; q_point < n_q_points; ++q_point) {
cell_matrix(i, j) += fe_values[Er].curl(i, q_point) *
fe_values[Er].curl(j, q_point) * fe_values.JxW(q_point);
}
} else {
for (q_point = 0; q_point < n_q_points; ++q_point) {
cell_matrix(i, j) += fe_values[Ei].curl(i, q_point) *
fe_values[Ei].curl(j, q_point) * fe_values.JxW(q_point);
}
}
} else {
if (base_index_i.first.first == 0) {
for (q_point = 0; q_point < n_q_points; ++q_point) {
cell_matrix(i, j) += omega * mu * sigma *
fe_values[Er].value(i, q_point) *
fe_values[Ei].value(j, q_point) * fe_values.JxW(q_point);
}
} else {
for (q_point = 0; q_point < n_q_points; ++q_point) {
cell_matrix(i, j) += -1.0 * omega * mu * sigma *
fe_values[Ei].value(i, q_point) *
fe_values[Er].value(j, q_point) * fe_values.JxW(q_point);
}
}
}
for (q_point = 0; q_point < n_q_points; ++q_point) {
if (base_index_i.first.first == 0) {
cell_rhs(i) += -1.0 * omega * mu * d_sigma * fe_values[Er].value(i, q_point) *
epi_rhs[q_point] * fe_values.JxW(q_point);
} else {
cell_rhs(i) += omega * mu * d_sigma * fe_values[Ei].value(i, q_point) *
epr_rhs[q_point] * fe_values.JxW(q_point);
}
}
}
}
cell->get_dof_indices (local_dof_indices);
constraints.distribute_local_to_global (cell_matrix, local_dof_indices, system_matrix);
constraints.distribute_local_to_global (cell_rhs, local_dof_indices, system_rhs);
}
}
template <int dim>
void MaxwellProblem<dim>::solve ()
{
SparseDirectUMFPACK A_direct;
A_direct.initialize(system_matrix);
A_direct.vmult (solution, system_rhs);
constraints.distribute (solution);
}
template <int dim>
void MaxwellProblem<dim>::postprocess ()
{
Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
KellyErrorEstimator<dim>::estimate (dof_handler, QGauss<dim - 1>(2),
typename FunctionMap<dim>::type(), solution, estimated_error_per_cell);
GridRefinement::refine_and_coarsen_fixed_number (triangulation,
estimated_error_per_cell,
0.3, 0.03);
triangulation.execute_coarsening_and_refinement ();
}
Of course the matrix will have zero entries. We are dealing with sparse matrices, after all. Or do you mean the local matrix?
PETSC ERROR: Error in external library!PETSC ERROR: Error reported by MUMPS in numerical factorization phase: Cannot allocate required memory 382825844 megabytes...
What preconditioners did you try with the iterative methods like
GMRES? ILU or AMG come to my mind.
For the direct solver, you ran out of memory. Try a smaller problem