I assume it has something to do with the synchronization.
The computation of material forces is done similar to the system_rhs vector.
Vector<double> cell_cf(dofs_per_cell); // Cell Configurational forces
typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end();
for (; cell != endc; ++cell){ if ( cell->is_locally_owned() ) { fe_values.reinit(cell);
cell_cf = 0;
...
// Configurational force computation ...
...
cell->get_dof_indices(local_dof_indices);
constraints.distribute_local_to_global(cell_cf, local_dof_indices, configurational_forces);
}
}
configurational_forces.compress(VectorOperation::add);
For a single core computation it is validated and correct.
What could be possibly wrong when going to parallel?
Best regards,
Seyed Ali Mohseni
DataOut<dim> data_out;data_out.attach_dof_handler(dof_handler);
std::vector<std::string> configurational_forces_magnitude(dim, "config_forces");
std::vector<DataComponentInterpretation::DataComponentInterpretation> configurational_forces_interpretation(dim, DataComponentInterpretation::component_is_part_of_vector);
data_out.add_data_vector(configurational_forces, configurational_forces_magnitude, DataOut<dim>::type_dof_data, configurational_forces_interpretation);
DataOut<dim> data_out;data_out.attach_dof_handler(dof_handler);
// ========================[ CONFIGURATIONAL FORCES ]==========================
std::vector<std::string> configurational_forces_magnitude(dim, "config_forces");
std::vector<DataComponentInterpretation::DataComponentInterpretation> configurational_forces_interpretation(dim, DataComponentInterpretation::component_is_part_of_vector);
data_out.add_data_vector(configurational_forces, configurational_forces_magnitude, DataOut<dim>::type_dof_data, configurational_forces_interpretation);
// ================================[ WRITE VTU ]===============================
// Construct data structuresdata_out.build_patches();
// Write outputconst std::string filename = ("solution-" + Utilities::int_to_string(timestep_no, 4) + "." + Utilities::int_to_string(triangulation.locally_owned_subdomain(), 4)); std::ofstream output(("output/" + filename + ".vtu").c_str()); // Store file name string as output streamdata_out.write_vtu(output); // Write .vtu output file
// ================================[ WRITE PVTU ]==============================
// Write "master record" (names of the various files that combined represents the graphical data for the entire domain)if ( Utilities::MPI::this_mpi_process(mpi_com) == 0 ) // only 1st processor runs this task!{ std::vector<std::string> filenames; for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_com); ++i) filenames.push_back("solution-" + Utilities::int_to_string(timestep_no, 4) + "." + Utilities::int_to_string(i, 4) + ".vtu");
std::ofstream master_output(("output/solution-" + Utilities::int_to_string(timestep_no, 4) + ".pvtu").c_str()); data_out.write_pvtu_record(master_output, filenames);}
LA::MPI::Vector configurational_forces;
configurational_forces.reinit(locally_owned_dofs, mpi_com);
----------------------------------------------------Exception on processing:
--------------------------------------------------------An error occurred in line <131> of file </home/seyedali/programming/c++/projects/dealii/source/lac/petsc_vector_base.cc> in function dealii::PETScWrappers::internal::VectorReference::operator double() constThe violated condition was: (index >= static_cast<size_type>(begin)) && (index < static_cast<size_type>(end))Additional information: You tried to access element 18 of a distributed vector, but only elements 0 through 17 are stored locally and can be accessed.--------------------------------------------------------
Aborting!----------------------------------------------------
locally_relevant_solution.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
data_out.add_data_vector (locally_relevant_solution, "u");
locally_relevant_solution = locally_owned_solution;
LA::MPI::Vector configurational_forces;LA::MPI::Vector local_configurational_forces;
configurational_forces.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_com);
Vector<double> cell_cf(dofs_per_cell); // Cell Configurational forces
typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end();
for (; cell != endc; ++cell){ if ( cell->is_locally_owned() ) { fe_values.reinit(cell);
cell_cf = 0;
...
// Configurational force computation ...
...
cell->get_dof_indices(local_dof_indices);
constraints.distribute_local_to_global(cell_cf, local_dof_indices, local_configurational_forces);
}
}
local_configurational_forces.compress(VectorOperation::add);
// ========================[ CONFIGURATIONAL FORCES ]==========================
configurational_forces = local_configurational_forces; (here I receive an error!)
std::vector<std::string> configurational_forces_magnitude(dim, "config_forces");
std::vector<DataComponentInterpretation::DataComponentInterpretation> configurational_forces_interpretation(dim, DataComponentInterpretation::component_is_part_of_vector);
data_out.add_data_vector(configurational_forces, configurational_forces_magnitude, DataOut<dim>::type_dof_data, configurational_forces_interpretation);
make all Scanning dependencies of target solid_mechanics[ 50%] Building CXX object CMakeFiles/solid_mechanics.dir/solid_mechanics.cc.o/home/seyedali/fe_models/deal.II/solid_mechanics/solid_mechanics.cc(950): error: no operator "=" matches these operands operand types are: const dealii::LinearAlgebraPETSc::MPI::Vector = const dealii::LinearAlgebraPETSc::MPI::Vector configurational_forces = local_configurational_forces; ^ detected during: instantiation of "void SolidMechanics<dim>::do_initial_timestep() [with dim=2]" at line 1081 instantiation of "void SolidMechanics<dim>::run() [with dim=2]" at line 1113
compilation aborted for /home/seyedali/fe_models/deal.II/solid_mechanics/solid_mechanics.cc (code 2)make[2]: *** [CMakeFiles/solid_mechanics.dir/solid_mechanics.cc.o] Fehler 2CMakeFiles/solid_mechanics.dir/build.make:62: die Regel für Ziel „CMakeFiles/solid_mechanics.dir/solid_mechanics.cc.o“ scheiterteCMakeFiles/Makefile2:195: die Regel für Ziel „CMakeFiles/solid_mechanics.dir/all“ scheitertemake[1]: *** [CMakeFiles/solid_mechanics.dir/all] Fehler 2Makefile:83: die Regel für Ziel „all“ scheitertemake: *** [all] Fehler 2
On 04 Feb 2017, at 23:09, 'Seyed Ali Mohseni' via deal.II User Group <dea...@googlegroups.com> wrote:output_results()
pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_com) == 0)),
Maybe this helps:How do you compute "reaction forces" within deal.II?
Any ideas what could cause such a behavior? I am really not an expert yet to dive deeply inside the deal.II parallel structure.