int Ifpack_Hypre::ApplyInverse(const Epetra_MultiVector& X, Epetra_MultiVector& Y) const{ if(IsComputed() == false){ IFPACK_CHK_ERR(-1); } // These are hypre requirements // hypre needs A, X, and Y to have the same contiguous distribution // NOTE: Maps are only considered to be contiguous if they were generated using a // particular constructor. Otherwise, LinearMap() will not detect whether they are // actually contiguous. if(!X.Map().LinearMap() || !Y.Map().LinearMap()) { std::cerr << "ERROR: X and Y must have contiguous maps.\n"; IFPACK_CHK_ERR(-1); } if(!X.Map().PointSameAs(*MySimpleMap_) || !Y.Map().PointSameAs(*MySimpleMap_)) { std::cerr << "ERROR: X, Y, and A must have the same distribution.\n"; IFPACK_CHK_ERR(-1); }
#include <deal.II/grid/tria.h>#include <deal.II/dofs/dof_handler.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>#include <deal.II/grid/tria_iterator.h>#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_values.h>#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>#include <deal.II/numerics/vector_tools.h>#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/lac/vector.h>#include <deal.II/lac/full_matrix.h>#include <deal.II/lac/sparse_matrix.h>#include <deal.II/lac/dynamic_sparsity_pattern.h>//#include <deal.II/lac/solver_cg.h>#include <deal.II/lac/precondition.h>
#include <deal.II/numerics/data_out.h>#include <fstream>#include <iostream>///////////////////////////////////////////////////#include <deal.II/base/mpi.h>#include <deal.II/lac/generic_linear_algebra.h>#include <deal.II/base/utilities.h>#include <deal.II/base/conditional_ostream.h>#include <deal.II/base/index_set.h>#include <deal.II/lac/sparsity_tools.h>//#include <deal.II/distributed/tria.h>#include <deal.II/distributed/grid_refinement.h>//#include <deal.II/base/timer.h>//#include <deal.II/grid/grid_tools.h>#include <deal.II/dofs/dof_renumbering.h>#include <deal.II/lac/trilinos_solver.h>//#include <deal.II/lac/trilinos_vector.h>#include <deal.II/lac/trilinos_vector_base.h>#include <deal.II/lac/trilinos_sparse_matrix.h>
namespace LA = dealii::LinearAlgebraTrilinos;
using namespace dealii;
class AmgElliptic{public: AmgElliptic ();
void run (); void set_uniform_grid (unsigned int n_cells, double length);
void setup_system (); void assemble_system (); void solve (); void output_results () const;
Triangulation<2> triangulation; FE_Q<2> fe; DoFHandler<2> dof_handler;
SparsityPattern sparsity_pattern;
MPI_Comm mpi_communicator;
const unsigned int n_mpi_processes; const unsigned int this_mpi_process;
ConditionalOStream pcout; // TimerOutput computing_timer;
LA::MPI::SparseMatrix system_matrix; LA::MPI::Vector system_rhs; LA::MPI::Vector solution;
};
AmgElliptic::AmgElliptic () : mpi_communicator (MPI_COMM_WORLD), n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)), pcout (std::cout, (this_mpi_process == 0)), fe (1), dof_handler (triangulation){ set_uniform_grid(10,1.0); }
void AmgElliptic :: set_uniform_grid(unsigned int n_cells, double length){ dealii::Point<2> lower_left_pt(0.0,0.0),upper_right_pt(length,length); std::vector<unsigned int> repetitions(2); repetitions[0] = n_cells; repetitions[1] = n_cells;
dealii::GridGenerator::subdivided_hyper_rectangle(triangulation , repetitions , lower_left_pt , upper_right_pt);
}
void AmgElliptic::setup_system (){
GridTools::partition_triangulation (n_mpi_processes, triangulation);
dof_handler.distribute_dofs (fe); DoFRenumbering::subdomain_wise (dof_handler);
const std::vector<IndexSet> locally_owned_dofs_per_proc = DoFTools::locally_owned_dofs_per_subdomain(dof_handler); const IndexSet locally_owned_dofs = locally_owned_dofs_per_proc[this_mpi_process];
DynamicSparsityPattern dsp(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, dsp); sparsity_pattern.copy_from(dsp);
system_matrix.reinit (locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); // // Here I check if is_ascending_and_one_to_one(mpi_communicator) will be true and when I run // the result printed is 1 // std::cout<<"is_ascending_and_one_to_one = "<< locally_owned_dofs.is_ascending_and_one_to_one(mpi_communicator)<<"\n\n"; // // // solution.reinit(locally_owned_dofs, mpi_communicator); system_rhs.reinit(locally_owned_dofs, mpi_communicator); // // Here I check if LinearMap_ for these vectors is true. When I run, this prints 0 // std::cout<<"solution.trilinos_vector().Map().LinearMap() = "<< solution.trilinos_vector().Map().LinearMap()<<"\n\n";
// // Here I try something else and make a new vector with a copy constructor to see // if this will have LinearMap_ set to 1 // Epetra_MultiVector e_soln_vector(solution.trilinos_vector()); Epetra_MultiVector e_rhs_vector(system_rhs.trilinos_vector()); // std::cout<<"e_soln_vector.trilinos_vector().Map().LinearMap() = "<< e_soln_vector.Map().LinearMap()<<"\n";
}
void AmgElliptic::run (){ setup_system ();}
int main (int argc, char **argv){ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); deallog.depth_console (2);
AmgElliptic test_simulation; test_simulation.run();
return 0;}
I solved my problem. In the code I included below, I was using the Vector object from the namespace dealii::LinearAlgebraTrilinos::MPI. When I switch to using Vector objects from the namespace dealii::LinearAlgebra::EpetraWrappers (and add the trilinos_epetra_vector.h include file), the Epetra vector has LinearMap set to true.
I haven't written either of these classes, so can't say for sure, but it
sounds like an inadvertent difference that we should fix eventually.
The key issue for my application is whether the make_trilinos_map method of the IndexSet class is called with overlapping equal to true or false. At the moment, when make_trilinos_map is called from the reinit method for Vector in the TrilinosWrappers∷MPI namespace, overlapping is always set to true.
I believe that, at lease with my simple grid and running on only one processor, that overlapping can be set to false. My question is, does the following code change make any sense? In my case, has_ghosts is evaluating to false and so when I make this code change, I have linear epetra vectors. I'll need to think about how, or if, I could make this work with Ifpack and Hypre when there are Ghost elements.
void Vector::reinit (const IndexSet ¶llel_partitioner, const MPI_Comm &communicator, const bool /*omit_zeroing_entries*/) { nonlocal_vector.reset();
Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator, true);
vector.reset (new Epetra_FEVector(map));
void Vector::reinit (const IndexSet ¶llel_partitioner, const MPI_Comm &communicator, const bool /*omit_zeroing_entries*/) {
nonlocal_vector.reset();
has_ghosts = vector->Map().UniqueGIDs()==false;
Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator, has_ghosts);
vector.reset (new Epetra_FEVector(map));
The key issue for my application is whether the make_trilinos_map method of the IndexSet class is called with overlapping equal to true or false. At the moment, when make_trilinos_map is called from the reinit method for Vector in the TrilinosWrappers∷MPI namespace, overlapping is always set to true.
I believe that, at lease with my simple grid and running on only one processor, that overlapping can be set to false. My question is, does the following code change make any sense? In my case, has_ghosts is evaluating to false and so when I make this code change, I have linear epetra vectors. I'll need to think about how, or if, I could make this work with Ifpack and Hypre when there are Ghost elements.
[...]