double
HyperbolicLevelIntegrator::advanceLevel(
const std::shared_ptr<hier::PatchLevel>& level,
const std::shared_ptr<hier::PatchHierarchy>& hierarchy,
const double current_time,
const double new_time,
const bool first_step,
const bool last_step,
const bool regrid_advance)
{
printf("start HyperbolicLevelIntegrator::advanceLevel.\n");
TBOX_ASSERT(level);
TBOX_ASSERT(hierarchy);
TBOX_ASSERT(current_time <= new_time);
TBOX_ASSERT_OBJDIM_EQUALITY2(*level, *hierarchy);
// HLI_RECORD_STATS is defined in HyperbolicLevelIntegrator.h
#ifdef HLI_RECORD_STATS
recordStatistics(*level, current_time);
#endif
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level->start();
//t_advance_level_pre_integrate->start();
const int level_number = level->getLevelNumber();
const double dt = new_time - current_time;
/*
* (1) Allocate data needed for advancing level.
* (2) Generate temporary communication schedule to fill ghost
* cells, if needed.
* (3) Fill ghost cell data.
* (4) Process flux storage before the advance.
*/
level->setTime(new_time, d_new_time_dep_data);
level->setTime(current_time, d_saved_var_scratch_data);
level->setTime(current_time, d_rk3_temp_data);
// allocate specified component on new_time step / all patches.
level->allocatePatchData(d_new_time_dep_data, new_time);
level->allocatePatchData(d_saved_var_scratch_data, current_time);
level->allocatePatchData(d_rk3_temp_data, current_time);// fane
// store current flow field in d_rk3
copyTimeDependentData(level, d_current, d_rk3);
copyTimeDependentData(level, d_current, d_scratch);
/*
* Loop through Runge-Kutta steps
*/
for (int rkstep = 0; rkstep < d_order; ++rkstep) {
t_advance_level_pre_integrate->start();
std::shared_ptr<xfer::RefineSchedule> fill_schedule;
if (!level->inHierarchy()) {
t_error_bdry_fill_create->start();
const hier::OverlapConnectorAlgorithm oca;
const int coarser_ln = level->getNextCoarserHierarchyLevelNumber();
if (coarser_ln < 0) {
// Don't use coarser level in boundary fill.
if (d_number_time_data_levels == 3) {
fill_schedule =
d_bdry_fill_advance_old->createSchedule(level,
coarser_ln,
hierarchy,
d_patch_strategy);
} else {
fill_schedule =
d_bdry_fill_advance->createSchedule(level,
coarser_ln,
hierarchy,
d_patch_strategy);
}
} else {
// Use coarser level in boundary fill.
if (d_number_time_data_levels == 3) {
fill_schedule =
d_bdry_fill_advance_old->createSchedule(level,
coarser_ln,
hierarchy,
d_patch_strategy);
} else {
fill_schedule =
d_bdry_fill_advance->createSchedule(level,
coarser_ln,
hierarchy,
d_patch_strategy);
}
}
t_error_bdry_fill_create->stop();
} else {
fill_schedule = d_bdry_sched_advance[level_number];
}
printf("1111 Finish fill_schedule.\n");
d_patch_strategy->setDataContext(d_scratch);
if (regrid_advance) {
t_error_bdry_fill_comm->start();
} else {
t_advance_bdry_fill_comm->start();
}
printf("1111 Finish t_advance_bdry_fill_comm->start().\n");
fill_schedule->fillData(current_time);// this line setPhysicalBoundaryCondition.
printf("1111 fill_schedule->fillData(current_time).\n");
if (regrid_advance) {
t_error_bdry_fill_comm->stop();
} else {
t_advance_bdry_fill_comm->stop();
}
printf("1111 Finish t_advance_bdry_fill_comm->stop().\n");
d_patch_strategy->clearDataContext();
fill_schedule.reset();
printf("1111 fill_schedule.reset().\n");
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level_pre_integrate->stop();
t_advance_level_integrate->start();
preprocessFluxData(level,
current_time,
new_time,
regrid_advance,
first_step,
last_step);
printf("1111 preprocessFluxData.\n");
/*
* (5) Call user-routine to pre-process state data, if needed.
* (6) Advance solution on all level patches (scratch storage).
* (7) Copy new solution to from scratch to new storage.
* (8) Call user-routine to post-process state data, if needed.
*/
t_patch_num_kernel->start();
d_patch_strategy->preprocessAdvanceLevelState(level,
current_time,
dt,
first_step,
last_step,
regrid_advance);
t_patch_num_kernel->stop();
printf("5555 preprocessAdvanceLevelState.\n");
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level_patch_loop->start();
// setDataContext: d_data_context = d_scratch
d_patch_strategy->setDataContext(d_scratch);
// setDataContextNoGhost: d_data_context_no_ghosts = d_current
d_patch_strategy->setDataContextNoGhost(d_current);//-fane
for (hier::PatchLevel::iterator ip(level->begin());
ip != level->end(); ++ip) {
const std::shared_ptr<hier::Patch>& patch = *ip;
d_patch_strategy->printPatchData(*patch, current_time);
printf("before allocatePatchData!\n");
//patch->allocatePatchData(d_temp_var_scratch_data, current_time);
t_patch_num_kernel->start();
d_patch_strategy->computeFluxesOnPatch(*patch,
current_time,
dt);
t_patch_num_kernel->stop();
printf("6666 computeFluxesOnPatch.\n");
bool at_syncronization = false;
t_patch_num_kernel->start();
d_patch_strategy->conservativeDifferenceOnPatch(*patch,
current_time,
dt,
at_syncronization,
d_alpha_1[rkstep],
d_alpha_2[rkstep],
d_beta[rkstep]);
t_patch_num_kernel->stop();
printf("6666 conservativeDifferenceOnPatch.\n");
//patch->deallocatePatchData(d_temp_var_scratch_data);
}
d_patch_strategy->clearDataContext();
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level_patch_loop->stop();
level->setTime(new_time, d_saved_var_scratch_data);
level->setTime(new_time, d_flux_var_data);
copyTimeDependentData(level, d_scratch, d_new);
t_patch_num_kernel->start();
d_patch_strategy->postprocessAdvanceLevelState(level,
current_time,
dt,
first_step,
last_step,
regrid_advance);
t_patch_num_kernel->stop();
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level_integrate->stop();
}//end of RK loop
t_advance_level_post_integrate->start();
printf("9999.\n");
/*
* (9) If the level advance is for regridding, we compute the next timestep:
*
* (a) If the dt computation is lagged (i.e., we use pre-advance data
* to compute timestep), we reset scratch space on patch interiors
* if needed. Then, we set the strategy context to current or scratch
* depending on whether ghost values are used to compute dt.
* (b) If the dt computation is not lagged (i.e., we use advanced data
* to compute timestep), we refill scratch space, including ghost
* data with new solution values if needed. Then, we set the strategy
* context to new or scratch depending on whether ghost values are
* used to compute dt.
* (c) Then, we loop over patches and compute the dt on each patch.
*/
double dt_next = tbox::MathUtilities<double>::getMax();
if (!regrid_advance) {
t_advance_level_compute_dt->start();
if (d_lag_dt_computation) {
if (d_use_ghosts_for_dt) {
d_patch_strategy->setDataContext(d_scratch);
copyTimeDependentData(level, d_current, d_scratch);
} else {
d_patch_strategy->setDataContext(d_current);
}
} else {
if (d_use_ghosts_for_dt) {
if (!d_bdry_sched_advance_new[level_number]) {
TBOX_ERROR(
d_object_name << ": "
<< "Attempt to fill new ghost data for timestep "
<< "computation, but schedule not defined." << std::endl);
}
d_patch_strategy->setDataContext(d_scratch);
t_new_advance_bdry_fill_comm->start();
d_bdry_sched_advance_new[level_number]->fillData(new_time);
t_new_advance_bdry_fill_comm->stop();
} else {
d_patch_strategy->setDataContext(d_new);
}
}
for (hier::PatchLevel::iterator ip(level->begin());
ip != level->end(); ++ip) {
const std::shared_ptr<hier::Patch>& patch = *ip;
patch->allocatePatchData(d_temp_var_scratch_data, new_time);
// "false" argument indicates "initial_time" is false.
t_patch_num_kernel->start();
double patch_dt =
d_patch_strategy->computeStableDtOnPatch(*patch,
false,
new_time);
t_patch_num_kernel->stop();
dt_next = tbox::MathUtilities<double>::Min(dt_next, patch_dt);
patch->deallocatePatchData(d_temp_var_scratch_data);
}
d_patch_strategy->clearDataContext();
t_advance_level_compute_dt->stop();
} // !regrid_advance
level->deallocatePatchData(d_saved_var_scratch_data);
level->deallocatePatchData(d_rk3_temp_data);
postprocessFluxData(level,
regrid_advance,
first_step,
last_step);
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level_post_integrate->stop();
t_advance_level_sync->start();
if (d_distinguish_mpi_reduction_costs) {
hierarchy->getMPI().Barrier();
t_advance_level_sync->stop();
t_mpi_reductions->start();
}
double next_dt = dt_next;
const tbox::SAMRAI_MPI& mpi(hierarchy->getMPI());
if (mpi.getSize() > 1) {
mpi.AllReduce(&next_dt, 1, MPI_MIN);
}
next_dt *= d_cfl;
if (d_distinguish_mpi_reduction_costs) {
t_mpi_reductions->stop();
} else {
t_advance_level_sync->stop();
}
if ( d_barrier_advance_level_sections ) level->getBoxLevel()->getMPI().Barrier();
t_advance_level->stop();
printf("end HyperbolicLevelIntegrator::advanceLevel.\n");
return next_dt;
}