https://bitbucket.org/enzo/enzo-dev/commits/00cdea5b78fb/
Changeset: 00cdea5b78fb
Branch: week-of-code
User: Philipp Grete
Date: 2016-03-17 13:54:49+00:00
Summary: Fresh MHDSGS framework
Affected #: 13 files
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid.h
--- a/src/enzo/Grid.h
+++ b/src/enzo/Grid.h
@@ -2392,6 +2392,34 @@
int FTStochasticForcing(int FieldDim); // WS
+ // pgrete: Jacobians to be used in SGS model
+ float *JacVel[MAX_DIMENSION][MAX_DIMENSION];
+ float *JacB[MAX_DIMENSION][MAX_DIMENSION];
+
+
+ float *FilteredFields[7]; // filtered fields: rho, xyz-vel, Bxyz
+
+ // the scale-similarity model needs mixed filtered quantities
+ float *FltrhoUU[6];
+ float *FltBB[6];
+ float *FltUB[3];
+
+ int SGSUtil_ComputeJacobian(float *Jac[][MAX_DIMENSION],float* field1,float* field2,float* field3);
+ int SGSUtil_ComputeMixedFilteredQuantities();
+ int SGSUtil_FilterFields();
+ int SGSAddEMFTerms(float **dU);
+ int SGSAddMomentumTerms(float **dU);
+ void SGSAddEMFERS2J2Term(float **EMF);
+ void SGSAddEMFERS2M2StarTerm(float **EMF);
+ void SGSAddEMFNLemfComprTerm(float **EMF);
+ void SGSAddTauNLuTerm(float **Tau);
+ void SGSAddTauNLuNormedEnS2StarTerm(float **Tau);
+ void SGSAddTauNLbTerm(float **Tau);
+ void SGSAddTauEVEnS2StarTerm(float **Tau);
+ void SGSAddTauSSuTerm(float **Tau);
+ void SGSAddTauSSbTerm(float **Tau);
+ void SGSAddEMFSSTerm(float **EMF);
+
/* Comoving coordinate expansion terms. */
int ComovingExpansionTerms();
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid_SGSAddEMFTerms.C
--- /dev/null
+++ b/src/enzo/Grid_SGSAddEMFTerms.C
@@ -0,0 +1,397 @@
+#include "preincludes.h"
+#include "macros_and_parameters.h"
+#include "typedefs.h"
+#include "global_data.h"
+#include "Fluxes.h"
+#include "GridList.h"
+#include "ExternalBoundary.h"
+#include "Grid.h"
+/* pure (unscaled) full (compressible) nonlinear model
+ * EMF = 1/12 * Delta^2 * eps_ijk * (u_j,l * B_k,l - (ln rho),l u_j,l B_k)
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddEMFNLemfComprTerm(float **EMF) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFNLemfComprTerm start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float *rho, *Bx, *By, *Bz;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ Bx = FilteredFields[4];
+ By = FilteredFields[5];
+ Bz = FilteredFields[6];
+ } else {
+ rho = BaryonField[DensNum];
+ Bx = BaryonField[B1Num];
+ By = BaryonField[B2Num];
+ Bz = BaryonField[B3Num];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the EMF in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float CDeltaSqr = 1./12. * SGScoeffNLemfCompr * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid, ip1, im1, jp1, jm1, kp1, km1;
+ float facX = 1. / (2. * CellWidth[0][0]);
+ float facY = 1. / (2. * CellWidth[1][0]);
+ float facZ = 1. / (2. * CellWidth[2][0]);
+ // partial derivatives of ln(rho)
+ float lnRhod[MAX_DIMENSION];
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+ ip1 = i+1 + (j+k*GridDimension[1])*GridDimension[0];
+ im1 = i-1 + (j+k*GridDimension[1])*GridDimension[0];
+ jp1 = i + (j+1+k*GridDimension[1])*GridDimension[0];
+ jm1 = i + (j-1+k*GridDimension[1])*GridDimension[0];
+ kp1 = i + (j+(k+1)*GridDimension[1])*GridDimension[0];
+ km1 = i + (j+(k-1)*GridDimension[1])*GridDimension[0];
+
+ lnRhod[X] = (rho[ip1] - rho[im1]) * facX / rho[igrid];
+ lnRhod[Y] = (rho[jp1] - rho[jm1]) * facY / rho[igrid];
+ lnRhod[Z] = (rho[kp1] - rho[km1]) * facZ / rho[igrid];
+
+ for (int l = 0; l < MAX_DIMENSION; l++) {
+ EMF[X][igrid] += CDeltaSqr * (
+ JacVel[Y][l][igrid] * JacB[Z][l][igrid]
+ - JacVel[Z][l][igrid] * JacB[Y][l][igrid]
+ - lnRhod[l] * JacVel[Y][l][igrid] * Bz[igrid]
+ + lnRhod[l] * JacVel[Z][l][igrid] * By[igrid]);
+ EMF[Y][igrid] += CDeltaSqr * (
+ JacVel[Z][l][igrid] * JacB[X][l][igrid]
+ - JacVel[X][l][igrid] * JacB[Z][l][igrid]
+ - lnRhod[l] * JacVel[Z][l][igrid] * Bx[igrid]
+ + lnRhod[l] * JacVel[X][l][igrid] * Bz[igrid]);
+ EMF[Z][igrid] += CDeltaSqr * (
+ JacVel[X][l][igrid] * JacB[Y][l][igrid]
+ - JacVel[Y][l][igrid] * JacB[X][l][igrid]
+ - lnRhod[l] * JacVel[X][l][igrid] * By[igrid]
+ + lnRhod[l] * JacVel[Y][l][igrid] * Bx[igrid]);
+ }
+
+ }
+
+}
+
+/* eddy resistivity model scaled by Smagorinsky energies
+ * EMF = -C * Delta^2 * sqrt(|S|^2 + |J|^2/rho) * J
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddEMFERS2J2Term(float **EMF) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFERS2J2Term start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the EMF in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float MinusCDeltaSqr = -SGScoeffERS2J2 * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid;
+ float sqrtS2plusJ2overRho;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ sqrtS2plusJ2overRho = pow(
+ 2.*(pow(JacVel[X][X][igrid],2.) +
+ pow(JacVel[Y][Y][igrid],2.) +
+ pow(JacVel[Z][Z][igrid],2.)
+ )
+ + pow(JacVel[X][Y][igrid] + JacVel[Y][X][igrid],2.)
+ + pow(JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid],2.)
+ + pow(JacVel[X][Z][igrid] + JacVel[Z][X][igrid],2.)
+ + (pow(JacB[Z][Y][igrid] - JacB[Y][Z][igrid],2.) +
+ pow(JacB[X][Z][igrid] - JacB[Z][X][igrid],2.) +
+ pow(JacB[Y][X][igrid] - JacB[X][Y][igrid],2.)
+ )/rho[igrid],1./2.);
+
+ EMF[X][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
+ (JacB[Z][Y][igrid] - JacB[Y][Z][igrid]);
+ EMF[Y][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
+ (JacB[X][Z][igrid] - JacB[Z][X][igrid]);
+ EMF[Z][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
+ (JacB[Y][X][igrid] - JacB[X][Y][igrid]);
+
+ }
+
+}
+
+/* eddy resistivity model scaled by realiz. energies
+ * EMF = -C * Delta^2 * sqrt(|S*|^2 + |M|^2/rho) * J
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddEMFERS2M2StarTerm(float **EMF) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFERS2M2StarTerm start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the EMF in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float MinusCDeltaSqr = -SGScoeffERS2M2Star * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid;
+
+ /* magic with S |S| S*... could potentially handled by
+ * external function, should reduce CPU time, but increase memory usage
+ */
+ float traceSthird, traceMthird;
+ float sqrtS2StarplusM2overRho;
+ /* just for fun: how accurate is Dedner
+ * we count the number of cells where divB is dynamically important
+ * divB * Delta / |B| > 1.
+ */
+ int divBerror = 0;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ traceSthird = (JacVel[X][X][igrid] + JacVel[Y][Y][igrid] + JacVel[Z][Z][igrid])/3.;
+ traceMthird = (JacB[X][X][igrid] + JacB[Y][Y][igrid] + JacB[Z][Z][igrid])/3.;
+
+ if (debug && (traceMthird*pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.)*3./pow(
+ BaryonField[B1Num][igrid]*BaryonField[B1Num][igrid] +
+ BaryonField[B2Num][igrid]*BaryonField[B2Num][igrid] +
+ BaryonField[B3Num][igrid]*BaryonField[B3Num][igrid],1./2.) > 1.)) {
+ divBerror++;
+ }
+
+
+ sqrtS2StarplusM2overRho = pow(
+ 2.*(pow(JacVel[X][X][igrid]-traceSthird,2.) +
+ pow(JacVel[Y][Y][igrid]-traceSthird,2.) +
+ pow(JacVel[Z][Z][igrid]-traceSthird,2.)
+ )
+ + pow(JacVel[X][Y][igrid] + JacVel[Y][X][igrid],2.)
+ + pow(JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid],2.)
+ + pow(JacVel[X][Z][igrid] + JacVel[Z][X][igrid],2.)
+ + (2.*(pow(JacB[X][X][igrid]-traceMthird,2.) +
+ pow(JacB[Y][Y][igrid]-traceMthird,2.) +
+ pow(JacB[Z][Z][igrid]-traceMthird,2.)
+ )
+ + pow(JacB[X][Y][igrid] + JacB[Y][X][igrid],2.)
+ + pow(JacB[Y][Z][igrid] + JacB[Z][Y][igrid],2.)
+ + pow(JacB[X][Z][igrid] + JacB[Z][X][igrid],2.)
+ )/rho[igrid],1./2.);
+
+ EMF[X][igrid] += MinusCDeltaSqr * sqrtS2StarplusM2overRho *
+ (JacB[Z][Y][igrid] - JacB[Y][Z][igrid]);
+ EMF[Y][igrid] += MinusCDeltaSqr * sqrtS2StarplusM2overRho *
+ (JacB[X][Z][igrid] - JacB[Z][X][igrid]);
+ EMF[Z][igrid] += MinusCDeltaSqr * sqrtS2StarplusM2overRho *
+ (JacB[Y][X][igrid] - JacB[X][Y][igrid]);
+
+ }
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFERS2M2StarTerm divB error total: %"ISYM" |\%: %"FSYM"\n",
+ MyProcessorNumber,divBerror,
+ (float) divBerror / (float)((EndIndex[0] + 1 - StartIndex[0])*(EndIndex[1] + 1 - StartIndex[1])*(EndIndex[2] + 1 - StartIndex[2])));
+
+}
+
+/* scale-similarity model
+ * EMF = flt(u x B) - flt(u) x flt(B)
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddEMFSSTerm(float **EMF) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFSSTerm start\n",MyProcessorNumber);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the EMF in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ int igrid;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ EMF[X][igrid] += SGScoeffSSemf * FltUB[X][igrid] - (
+ FilteredFields[2][igrid] * FilteredFields[6][igrid] -
+ FilteredFields[3][igrid] * FilteredFields[5][igrid]);
+ EMF[Y][igrid] += SGScoeffSSemf * FltUB[Y][igrid] - (
+ FilteredFields[3][igrid] * FilteredFields[4][igrid] -
+ FilteredFields[1][igrid] * FilteredFields[6][igrid]);
+ EMF[Z][igrid] += SGScoeffSSemf * FltUB[Z][igrid] - (
+ FilteredFields[1][igrid] * FilteredFields[5][igrid] -
+ FilteredFields[2][igrid] * FilteredFields[4][igrid]);
+
+ }
+
+}
+
+int grid::SGSAddEMFTerms(float **dU) {
+ if (ProcessorNumber != MyProcessorNumber) {
+ return SUCCESS;
+ }
+
+ if (Time == 0.)
+ return SUCCESS;
+
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFTerms start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ int size = 1;
+ float *EMF[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+ }
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ EMF[dim] = new float[size];
+ for (int i = 0; i < size; i++)
+ EMF[dim][i] = 0.;
+ }
+
+
+ if (SGScoeffERS2J2 != 0.)
+ SGSAddEMFERS2J2Term(EMF);
+
+ if (SGScoeffERS2M2Star != 0.)
+ SGSAddEMFERS2M2StarTerm(EMF);
+
+ if (SGScoeffNLemfCompr != 0.)
+ SGSAddEMFNLemfComprTerm(EMF);
+
+ if (SGScoeffSSemf != 0.)
+ SGSAddEMFSSTerm(EMF);
+
+ int n = 0;
+ int igrid, ip1, im1, jp1, jm1, kp1, km1;
+ float BxIncr,ByIncr,BzIncr,EtotIncr;
+
+ float facX = 1. / (2. * CellWidth[0][0]);
+ float facY = 1. / (2. * CellWidth[1][0]);
+ float facZ = 1. / (2. * CellWidth[2][0]);
+
+ for (int k = GridStartIndex[2]; k <= GridEndIndex[2]; k++)
+ for (int j = GridStartIndex[1]; j <= GridEndIndex[1]; j++)
+ for (int i = GridStartIndex[0]; i <= GridEndIndex[0]; i++, n++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+ ip1 = i+1 + (j+k*GridDimension[1])*GridDimension[0];
+ im1 = i-1 + (j+k*GridDimension[1])*GridDimension[0];
+ jp1 = i + (j+1+k*GridDimension[1])*GridDimension[0];
+ jm1 = i + (j-1+k*GridDimension[1])*GridDimension[0];
+ kp1 = i + (j+(k+1)*GridDimension[1])*GridDimension[0];
+ km1 = i + (j+(k-1)*GridDimension[1])*GridDimension[0];
+
+
+ BxIncr = dtFixed * ((EMF[2][jp1]-EMF[2][jm1])*facY - (EMF[1][kp1]-EMF[1][km1])*facZ);
+ EtotIncr = BaryonField[B1Num][igrid] * BxIncr + 0.5 * BxIncr * BxIncr;
+
+ ByIncr = dtFixed * ((EMF[0][kp1]-EMF[0][km1])*facZ - (EMF[2][ip1]-EMF[2][im1])*facX);
+ EtotIncr += BaryonField[B2Num][igrid] * ByIncr + 0.5 * ByIncr * ByIncr;
+
+ BzIncr = dtFixed * ((EMF[1][ip1]-EMF[1][im1])*facX - (EMF[0][jp1]-EMF[0][jm1])*facY);
+ EtotIncr += BaryonField[B3Num][igrid] * BzIncr + 0.5 * BzIncr * BzIncr;
+
+ dU[iBx][n] += BxIncr;
+ dU[iBy][n] += ByIncr;
+ dU[iBz][n] += BzIncr;
+ dU[iEtot][n] += EtotIncr;
+ }
+
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddEMFTerms end, last incr: %"FSYM" %"FSYM" %"FSYM" %"FSYM"\n",
+ MyProcessorNumber,BxIncr,ByIncr,BzIncr,EtotIncr);
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ delete [] EMF[dim];
+ }
+
+ return SUCCESS;
+}
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid_SGSAddMomentumTerms.C
--- /dev/null
+++ b/src/enzo/Grid_SGSAddMomentumTerms.C
@@ -0,0 +1,491 @@
+#include "preincludes.h"
+#include "macros_and_parameters.h"
+#include "typedefs.h"
+#include "global_data.h"
+#include "Fluxes.h"
+#include "GridList.h"
+#include "ExternalBoundary.h"
+#include "Grid.h"
+
+/* pure (unscaled) nonlinear model for TauU (full)
+ * TauU = 1/12 * Delta^2 rho u_i,k u_j,k
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauNLuTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauNLuTerm start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float CDeltaSqr = 1./12. * SGScoeffNLu * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ for (int l = 0; l < MAX_DIMENSION; l++) {
+ Tau[XX][igrid] += CDeltaSqr * rho[igrid] * JacVel[X][l][igrid] * JacVel[X][l][igrid];
+ Tau[YY][igrid] += CDeltaSqr * rho[igrid] * JacVel[Y][l][igrid] * JacVel[Y][l][igrid];
+ Tau[ZZ][igrid] += CDeltaSqr * rho[igrid] * JacVel[Z][l][igrid] * JacVel[Z][l][igrid];
+ Tau[XY][igrid] += CDeltaSqr * rho[igrid] * JacVel[X][l][igrid] * JacVel[Y][l][igrid];
+ Tau[YZ][igrid] += CDeltaSqr * rho[igrid] * JacVel[Y][l][igrid] * JacVel[Z][l][igrid];
+ Tau[XZ][igrid] += CDeltaSqr * rho[igrid] * JacVel[X][l][igrid] * JacVel[Z][l][igrid];
+ }
+
+ }
+
+}
+
+/* nonlinear model for TauU (full) and scaled by realiz. energy
+ * TauU = 2 C Delta^2 rho |S*|^2 (u_i,k u_j,k)/(u_l,s u_l,s)
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauNLuNormedEnS2StarTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauNLuNormedEnS2StarTerm start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float TwoCDeltaSqr = 2. * SGScoeffNLuNormedEnS2Star * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+ float traceSthird;
+ float SStarSqr;
+ float JacNorm;
+ float prefactor;
+
+ int igrid;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ traceSthird = (JacVel[X][X][igrid] + JacVel[Y][Y][igrid] + JacVel[Z][Z][igrid])/3.;
+
+ SStarSqr = (
+ 2.*(pow(JacVel[X][X][igrid]-traceSthird,2.) +
+ pow(JacVel[Y][Y][igrid]-traceSthird,2.) +
+ pow(JacVel[Z][Z][igrid]-traceSthird,2.)
+ )
+ + pow(JacVel[X][Y][igrid] + JacVel[Y][X][igrid],2.)
+ + pow(JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid],2.)
+ + pow(JacVel[X][Z][igrid] + JacVel[Z][X][igrid],2.));
+
+ JacNorm = 0.;
+ for (int l = 0; l < MAX_DIMENSION; l++)
+ for (int s = 0; s < MAX_DIMENSION; s++)
+ JacNorm += JacVel[l][s][igrid] * JacVel[l][s][igrid];
+
+ prefactor = TwoCDeltaSqr * rho[igrid] * SStarSqr / JacNorm;
+
+ for (int l = 0; l < MAX_DIMENSION; l++) {
+ Tau[XX][igrid] += prefactor * JacVel[X][l][igrid] * JacVel[X][l][igrid];
+ Tau[YY][igrid] += prefactor * JacVel[Y][l][igrid] * JacVel[Y][l][igrid];
+ Tau[ZZ][igrid] += prefactor * JacVel[Z][l][igrid] * JacVel[Z][l][igrid];
+ Tau[XY][igrid] += prefactor * JacVel[X][l][igrid] * JacVel[Y][l][igrid];
+ Tau[YZ][igrid] += prefactor * JacVel[Y][l][igrid] * JacVel[Z][l][igrid];
+ Tau[XZ][igrid] += prefactor * JacVel[X][l][igrid] * JacVel[Z][l][igrid];
+ }
+
+ }
+
+}
+
+/* pure (unscaled) nonlinear model for TauB (full)
+ * TauB = 1/12 * Delta^2 B_i,k B_j,k
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauNLbTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauNLbTerm start\n",MyProcessorNumber);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float CDeltaSqr = 1./12. * SGScoeffNLb * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid;
+ float turbMagPres;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ turbMagPres = 0.;
+ for (int l = 0; l < MAX_DIMENSION; l++) {
+ turbMagPres += JacB[X][l][igrid] * JacB[X][l][igrid]
+ + JacB[Y][l][igrid] * JacB[Y][l][igrid]
+ + JacB[Z][l][igrid] * JacB[Z][l][igrid];
+
+ Tau[XX][igrid] -= CDeltaSqr * JacB[X][l][igrid] * JacB[X][l][igrid];
+ Tau[YY][igrid] -= CDeltaSqr * JacB[Y][l][igrid] * JacB[Y][l][igrid];
+ Tau[ZZ][igrid] -= CDeltaSqr * JacB[Z][l][igrid] * JacB[Z][l][igrid];
+ Tau[XY][igrid] -= CDeltaSqr * JacB[X][l][igrid] * JacB[Y][l][igrid];
+ Tau[YZ][igrid] -= CDeltaSqr * JacB[Y][l][igrid] * JacB[Z][l][igrid];
+ Tau[XZ][igrid] -= CDeltaSqr * JacB[X][l][igrid] * JacB[Z][l][igrid];
+ }
+
+ Tau[XX][igrid] += CDeltaSqr * turbMagPres/2.;
+ Tau[YY][igrid] += CDeltaSqr * turbMagPres/2.;
+ Tau[ZZ][igrid] += CDeltaSqr * turbMagPres/2.;
+
+ }
+
+}
+
+
+/* eddy viscosity model for full tau scaled by realiz. energies
+ * Tau = -2 C_1 Delta^2 rho |S*| S* + 2/3 C_2 delta_ij Delta^2 rho |S*|^2
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauEVEnS2StarTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauEVEnS2StarTerm start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ float Minus2C1DeltaSqr = -2. * SGScoeffEVStarEnS2Star * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+ float TwoThirdC2DeltaSqr = 2./3. * SGScoeffEnS2StarTrace * pow(SGSFilterWidth,2.) *
+ pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+
+ int igrid;
+
+ /* magic with S |S| S*... could potentially handled by
+ * external function, should reduce CPU time, but increase memory usage
+ */
+ float traceSthird;
+ float SStarSqr;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ traceSthird = (JacVel[X][X][igrid] + JacVel[Y][Y][igrid] + JacVel[Z][Z][igrid])/3.;
+
+ SStarSqr = (
+ 2.*(pow(JacVel[X][X][igrid]-traceSthird,2.) +
+ pow(JacVel[Y][Y][igrid]-traceSthird,2.) +
+ pow(JacVel[Z][Z][igrid]-traceSthird,2.)
+ )
+ + pow(JacVel[X][Y][igrid] + JacVel[Y][X][igrid],2.)
+ + pow(JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid],2.)
+ + pow(JacVel[X][Z][igrid] + JacVel[Z][X][igrid],2.));
+
+
+ Tau[XX][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[X][X][igrid] - traceSthird) + TwoThirdC2DeltaSqr * rho[igrid] * SStarSqr;
+ Tau[YY][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[Y][Y][igrid] - traceSthird) + TwoThirdC2DeltaSqr * rho[igrid] * SStarSqr;
+ Tau[ZZ][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[Z][Z][igrid] - traceSthird) + TwoThirdC2DeltaSqr * rho[igrid] * SStarSqr;
+
+ Tau[XY][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[X][Y][igrid] + JacVel[Y][X][igrid])/2.;
+ Tau[YZ][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid])/2.;
+ Tau[ZX][igrid] += Minus2C1DeltaSqr * rho[igrid] * pow(SStarSqr,1./2.) * (
+ JacVel[Z][X][igrid] + JacVel[X][Z][igrid])/2.;
+
+ }
+}
+
+/* scale-similarity model for TauU
+ * TauU = flt(rho) * (flt(u_i u_j) - flt(u_i) * flt(u_j))
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauSSuTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauSSuTerm start\n",MyProcessorNumber);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ int igrid;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ Tau[XX][igrid] += SGScoeffSSu * (FltrhoUU[XX][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[1][igrid] * FilteredFields[1][igrid]);
+ Tau[YY][igrid] += SGScoeffSSu * (FltrhoUU[YY][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[2][igrid] * FilteredFields[2][igrid]);
+ Tau[ZZ][igrid] += SGScoeffSSu * (FltrhoUU[ZZ][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[3][igrid] * FilteredFields[3][igrid]);
+ Tau[XY][igrid] += SGScoeffSSu * (FltrhoUU[XY][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[1][igrid] * FilteredFields[2][igrid]);
+ Tau[YZ][igrid] += SGScoeffSSu * (FltrhoUU[YZ][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[2][igrid] * FilteredFields[3][igrid]);
+ Tau[XZ][igrid] += SGScoeffSSu * (FltrhoUU[XZ][igrid] -
+ FilteredFields[0][igrid] * FilteredFields[1][igrid] * FilteredFields[3][igrid]);
+
+ }
+
+}
+
+/* scale-similarity model for TauB
+ * TauU = (flt(B_i B_j) - flt(B_i) * flt(B_j))
+ * see eq TODO of TODO for details
+ */
+void grid::SGSAddTauSSbTerm(float **Tau) {
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddTauSSbTerm start\n",MyProcessorNumber);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need Tau in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+
+ int igrid;
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ Tau[XX][igrid] += SGScoeffSSb * (FltBB[XX][igrid] -
+ FilteredFields[4][igrid] * FilteredFields[4][igrid]);
+ Tau[YY][igrid] += SGScoeffSSb * (FltBB[YY][igrid] -
+ FilteredFields[5][igrid] * FilteredFields[5][igrid]);
+ Tau[ZZ][igrid] += SGScoeffSSb * (FltBB[ZZ][igrid] -
+ FilteredFields[6][igrid] * FilteredFields[6][igrid]);
+ Tau[XY][igrid] += SGScoeffSSb * (FltBB[XY][igrid] -
+ FilteredFields[4][igrid] * FilteredFields[5][igrid]);
+ Tau[YZ][igrid] += SGScoeffSSb * (FltBB[YZ][igrid] -
+ FilteredFields[5][igrid] * FilteredFields[6][igrid]);
+ Tau[XZ][igrid] += SGScoeffSSb * (FltBB[XZ][igrid] -
+ FilteredFields[4][igrid] * FilteredFields[6][igrid]);
+
+ }
+
+}
+
+int grid::SGSAddMomentumTerms(float **dU) {
+ if (ProcessorNumber != MyProcessorNumber) {
+ return SUCCESS;
+ }
+
+ if (Time == 0.)
+ return SUCCESS;
+
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddMomentumTerms start\n",MyProcessorNumber);
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ int B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ float* rho;
+ if (SGSFilterWidth > 1.) {
+ rho = FilteredFields[0];
+ } else {
+ rho = BaryonField[DensNum];
+ }
+
+ int size = 1;
+ float *Tau[6];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+ }
+
+ for (int dim = 0; dim < 6; dim++) {
+ Tau[dim] = new float[size];
+ for (int i = 0; i < size; i++)
+ Tau[dim][i] = 0.;
+ }
+
+
+ if (SGScoeffNLu != 0.)
+ SGSAddTauNLuTerm(Tau);
+
+ if (SGScoeffNLb != 0.)
+ SGSAddTauNLbTerm(Tau);
+
+ if ((SGScoeffEVStarEnS2Star != 0.) || (SGScoeffEnS2StarTrace != 0.))
+ SGSAddTauEVEnS2StarTerm(Tau);
+
+ if (SGScoeffNLuNormedEnS2Star != 0.)
+ SGSAddTauNLuNormedEnS2StarTerm(Tau);
+
+ if (SGScoeffSSu != 0.)
+ SGSAddTauSSuTerm(Tau);
+
+ if (SGScoeffSSb != 0.)
+ SGSAddTauSSbTerm(Tau);
+
+
+ int n = 0;
+ int igrid, ip1, im1, jp1, jm1, kp1, km1;
+ float MomxIncr,MomyIncr,MomzIncr,EtotIncr;
+
+ float facX = 1. / (2. * CellWidth[0][0]);
+ float facY = 1. / (2. * CellWidth[1][0]);
+ float facZ = 1. / (2. * CellWidth[2][0]);
+
+ for (int k = GridStartIndex[2]; k <= GridEndIndex[2]; k++)
+ for (int j = GridStartIndex[1]; j <= GridEndIndex[1]; j++)
+ for (int i = GridStartIndex[0]; i <= GridEndIndex[0]; i++, n++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+ ip1 = i+1 + (j+k*GridDimension[1])*GridDimension[0];
+ im1 = i-1 + (j+k*GridDimension[1])*GridDimension[0];
+ jp1 = i + (j+1+k*GridDimension[1])*GridDimension[0];
+ jm1 = i + (j-1+k*GridDimension[1])*GridDimension[0];
+ kp1 = i + (j+(k+1)*GridDimension[1])*GridDimension[0];
+ km1 = i + (j+(k-1)*GridDimension[1])*GridDimension[0];
+
+
+ MomxIncr = - dtFixed * (
+ (Tau[XX][ip1] - Tau[XX][im1])*facX +
+ (Tau[XY][jp1] - Tau[XY][jm1])*facY +
+ (Tau[XZ][kp1] - Tau[XZ][km1])*facZ);
+ EtotIncr = BaryonField[Vel1Num][igrid] * MomxIncr + 0.5 / rho[igrid] * MomxIncr * MomxIncr;
+
+ MomyIncr = - dtFixed * (
+ (Tau[YX][ip1] - Tau[YX][im1])*facX +
+ (Tau[YY][jp1] - Tau[YY][jm1])*facY +
+ (Tau[YZ][kp1] - Tau[YZ][km1])*facZ);
+ EtotIncr += BaryonField[Vel2Num][igrid] * MomyIncr + 0.5 / rho[igrid] * MomyIncr * MomyIncr;
+
+ MomzIncr = - dtFixed * (
+ (Tau[ZX][ip1] - Tau[ZX][im1])*facX +
+ (Tau[ZY][jp1] - Tau[ZY][jm1])*facY +
+ (Tau[ZZ][kp1] - Tau[ZZ][km1])*facZ);
+ EtotIncr += BaryonField[Vel3Num][igrid] * MomzIncr + 0.5 / rho[igrid] * MomzIncr * MomzIncr;
+
+ dU[ivx][n] += MomxIncr;
+ dU[ivy][n] += MomyIncr;
+ dU[ivz][n] += MomzIncr;
+ dU[iEtot][n] += EtotIncr;
+ }
+
+ if (debug)
+ printf("[%"ISYM"] grid::SGSAddMomentumTerms end, last incr: %"FSYM" %"FSYM" %"FSYM" %"FSYM"\n",
+ MyProcessorNumber,MomxIncr,MomyIncr,MomzIncr,EtotIncr);
+
+ for (int dim = 0; dim < 6; dim++) {
+ delete [] Tau[dim];
+ }
+
+ return SUCCESS;
+}
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid_SGSUtilities.C
--- /dev/null
+++ b/src/enzo/Grid_SGSUtilities.C
@@ -0,0 +1,266 @@
+#include "preincludes.h"
+#include <stdlib.h>
+#include "macros_and_parameters.h"
+#include "typedefs.h"
+#include "global_data.h"
+#include "Fluxes.h"
+#include "GridList.h"
+#include "ExternalBoundary.h"
+#include "Grid.h"
+
+
+int grid::SGSUtil_FilterFields() {
+ if (ProcessorNumber != MyProcessorNumber) {
+ return SUCCESS;
+ }
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num,
+ B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the filtered fields in the second ghost zone as well
+ * as we need derivatives in first ghost zone */
+ StartIndex[dim] = GridStartIndex[dim] - 2;
+ EndIndex[dim] = GridEndIndex[dim] + 2;
+ }
+
+ for (int m = 0; m < 7; m++)
+ if (FilteredFields[m] == NULL) {
+ FilteredFields[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FilteredFields[m][o] = 0.;
+ }
+
+ int N = SGSFilterStencil/2;
+ int igrid, ifilter;
+ float totalWeight;
+
+ /* this is !highly! inefficient, just making sure it's working */
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ for (int l = 0; l < 7; l++)
+ FilteredFields[l][igrid] = 0.;
+
+ for (int l = -N; l <= N; l++)
+ for (int m = -N; m <= N; m++)
+ for (int n = -N; n <= N; n++) {
+
+
+ ifilter = i + l + (j + m + (k+n)*GridDimension[1])*GridDimension[0];
+ totalWeight = SGSFilterWeights[ABS(l)] * SGSFilterWeights[ABS(m)] * SGSFilterWeights[ABS(n)];
+
+ // rho
+ FilteredFields[0][igrid] += totalWeight * BaryonField[DensNum][ifilter];
+
+ // prepare mass weighted velocity fields
+ FilteredFields[1][igrid] += totalWeight * BaryonField[DensNum][ifilter]*BaryonField[Vel1Num][ifilter];
+ FilteredFields[2][igrid] += totalWeight * BaryonField[DensNum][ifilter]*BaryonField[Vel2Num][ifilter];
+ FilteredFields[3][igrid] += totalWeight * BaryonField[DensNum][ifilter]*BaryonField[Vel3Num][ifilter];
+
+ // magnetic fields
+ FilteredFields[4][igrid] += totalWeight * BaryonField[B1Num][ifilter];
+ FilteredFields[5][igrid] += totalWeight * BaryonField[B2Num][ifilter];
+ FilteredFields[6][igrid] += totalWeight * BaryonField[B3Num][ifilter];
+ }
+
+ // now that the density is filtered, we can finalize mass-weighted filtering
+ FilteredFields[1][igrid] /= FilteredFields[0][igrid];
+ FilteredFields[2][igrid] /= FilteredFields[0][igrid];
+ FilteredFields[3][igrid] /= FilteredFields[0][igrid];
+ }
+
+
+ return SUCCESS;
+}
+
+int grid::SGSUtil_ComputeJacobian(float *Jac[][MAX_DIMENSION],float *field1,float* field2,float* field3) {
+ if (ProcessorNumber != MyProcessorNumber) {
+ return SUCCESS;
+ }
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the Jacobians in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+ for (int m = 0; m < MAX_DIMENSION; m++)
+ for (int n = 0; n < MAX_DIMENSION; n++) {
+ if (Jac[m][n] == NULL) {
+ Jac[m][n] = new float[size];
+ for (int o = 0; o < size; o++)
+ Jac[m][n][o] = 0.;
+ }
+ }
+
+
+ int igrid, ip1, im1, jp1, jm1, kp1, km1;
+ float facX = 1. / (2. * CellWidth[0][0]);
+ float facY = 1. / (2. * CellWidth[1][0]);
+ float facZ = 1. / (2. * CellWidth[2][0]);
+
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+ ip1 = i+1 + (j+k*GridDimension[1])*GridDimension[0];
+ im1 = i-1 + (j+k*GridDimension[1])*GridDimension[0];
+ jp1 = i + (j+1+k*GridDimension[1])*GridDimension[0];
+ jm1 = i + (j-1+k*GridDimension[1])*GridDimension[0];
+ kp1 = i + (j+(k+1)*GridDimension[1])*GridDimension[0];
+ km1 = i + (j+(k-1)*GridDimension[1])*GridDimension[0];
+
+ // xdx
+ Jac[X][X][igrid] = (field1[ip1] - field1[im1]) * facX;
+ // xdy
+ Jac[X][Y][igrid] = (field1[jp1] - field1[jm1]) * facY;
+ // xdz
+ Jac[X][Z][igrid] = (field1[kp1] - field1[km1]) * facZ;
+
+ // ydx
+ Jac[Y][X][igrid] = (field2[ip1] - field2[im1]) * facX;
+ // ydy
+ Jac[Y][Y][igrid] = (field2[jp1] - field2[jm1]) * facY;
+ // ydz
+ Jac[Y][Z][igrid] = (field2[kp1] - field2[km1]) * facZ;
+
+ // zdx
+ Jac[Z][X][igrid] = (field3[ip1] - field3[im1]) * facX;
+ // zdy
+ Jac[Z][Y][igrid] = (field3[jp1] - field3[jm1]) * facY;
+ // zdz
+ Jac[Z][Z][igrid] = (field3[kp1] - field3[km1]) * facZ;
+
+ }
+
+
+ return SUCCESS;
+}
+
+
+int grid::SGSUtil_ComputeMixedFilteredQuantities() {
+
+ if (ProcessorNumber != MyProcessorNumber) {
+ return SUCCESS;
+ }
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num,
+ B1Num, B2Num, B3Num, PhiNum;
+ this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
+ TENum, B1Num, B2Num, B3Num, PhiNum);
+
+ int size = 1;
+ int StartIndex[MAX_DIMENSION];
+ int EndIndex[MAX_DIMENSION];
+
+ for (int dim = 0; dim < MAX_DIMENSION; dim++) {
+ size *= GridDimension[dim];
+
+ /* we need the mixed terms in the first ghost zone as well
+ * as we'll take another derivative later on */
+ StartIndex[dim] = GridStartIndex[dim] - 1;
+ EndIndex[dim] = GridEndIndex[dim] + 1;
+ }
+
+ for (int m = 0; m < 6; m++) {
+ if (FltrhoUU[m] == NULL) {
+ FltrhoUU[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FltrhoUU[m][o] = 0.;
+ }
+ if (FltBB[m] == NULL) {
+ FltBB[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FltBB[m][o] = 0.;
+ }
+ }
+ for (int m = 0; m < 3; m++) {
+ if (FltUB[m] == NULL) {
+ FltUB[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FltUB[m][o] = 0.;
+ }
+ }
+
+ int N = SGSFilterStencil/2;
+ int igrid, ifilter;
+ float totalWeight;
+
+ /* this is !highly! inefficient, just making sure it's working */
+ for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
+ for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
+ for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
+
+ igrid = i + (j+k*GridDimension[1])*GridDimension[0];
+
+ for (int l = 0; l < 6; l++) {
+ FltrhoUU[l][igrid] = 0.;
+ FltBB[l][igrid] = 0.;
+ }
+ for (int l = 0; l < 3; l++)
+ FltUB[l][igrid] = 0.;
+
+ for (int l = -N; l <= N; l++)
+ for (int m = -N; m <= N; m++)
+ for (int n = -N; n <= N; n++) {
+
+
+ ifilter = i + l + (j + m + (k+n)*GridDimension[1])*GridDimension[0];
+ totalWeight = SGSFilterWeights[ABS(l)] * SGSFilterWeights[ABS(m)] * SGSFilterWeights[ABS(n)];
+
+ FltrhoUU[XX][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel1Num][ifilter] * BaryonField[Vel1Num][ifilter];
+ FltrhoUU[YY][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel2Num][ifilter] * BaryonField[Vel2Num][ifilter];
+ FltrhoUU[ZZ][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel3Num][ifilter] * BaryonField[Vel3Num][ifilter];
+ FltrhoUU[XY][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel1Num][ifilter] * BaryonField[Vel2Num][ifilter];
+ FltrhoUU[YZ][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel2Num][ifilter] * BaryonField[Vel3Num][ifilter];
+ FltrhoUU[XZ][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
+ BaryonField[Vel1Num][ifilter] * BaryonField[Vel3Num][ifilter];
+
+ FltBB[XX][igrid] += totalWeight * BaryonField[B1Num][ifilter] * BaryonField[B1Num][ifilter];
+ FltBB[YY][igrid] += totalWeight * BaryonField[B2Num][ifilter] * BaryonField[B2Num][ifilter];
+ FltBB[ZZ][igrid] += totalWeight * BaryonField[B3Num][ifilter] * BaryonField[B3Num][ifilter];
+ FltBB[XY][igrid] += totalWeight * BaryonField[B1Num][ifilter] * BaryonField[B2Num][ifilter];
+ FltBB[YZ][igrid] += totalWeight * BaryonField[B2Num][ifilter] * BaryonField[B3Num][ifilter];
+ FltBB[XZ][igrid] += totalWeight * BaryonField[B1Num][ifilter] * BaryonField[B3Num][ifilter];
+
+ FltUB[X][igrid] += totalWeight * (
+ BaryonField[Vel2Num][ifilter] * BaryonField[B3Num][ifilter] -
+ BaryonField[Vel3Num][ifilter] * BaryonField[B2Num][ifilter]);
+ FltUB[Y][igrid] += totalWeight * (
+ BaryonField[Vel3Num][ifilter] * BaryonField[B1Num][ifilter] -
+ BaryonField[Vel1Num][ifilter] * BaryonField[B3Num][ifilter]);
+ FltUB[Z][igrid] += totalWeight * (
+ BaryonField[Vel1Num][ifilter] * BaryonField[B2Num][ifilter] -
+ BaryonField[Vel2Num][ifilter] * BaryonField[B1Num][ifilter]);
+
+ }
+
+ }
+
+ return SUCCESS;
+}
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid_constructor.C
--- a/src/enzo/Grid_constructor.C
+++ b/src/enzo/Grid_constructor.C
@@ -66,7 +66,23 @@
}
PhaseFctInitEven = NULL; // WS
PhaseFctInitOdd = NULL; // WS
-
+
+ for (i = 0; i < MAX_DIMENSION; i++)
+ for (j = 0; j < MAX_DIMENSION; j++) {
+ JacVel[i][j] = NULL;
+ JacB[i][j] = NULL;
+ }
+
+ for (i = 0; i < 7; i++)
+ FilteredFields[i] = NULL;
+
+ for (i = 0; i < 6; i++) {
+ FltrhoUU[i] = NULL;
+ FltBB[i] = NULL;
+ }
+ for (i = 0; i < 3; i++)
+ FltUB[i] = NULL;
+
ParticleAcceleration[MAX_DIMENSION] = NULL;
/* clear MAX_NUMBER_OF_BARYON_FIELDS vectors & [][MAX_DIMENSION] matricies */
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Grid_destructor.C
--- a/src/enzo/Grid_destructor.C
+++ b/src/enzo/Grid_destructor.C
@@ -36,7 +36,7 @@
grid::~grid()
{
- int i;
+ int i,j;
/* Error check. */
@@ -63,6 +63,41 @@
if (PhaseFctInitEven != NULL) delete[] PhaseFctInitEven;
if (PhaseFctInitOdd != NULL) delete[] PhaseFctInitOdd;
+ for (i = 0; i < MAX_DIMENSION; i++)
+ for (j = 0; j < MAX_DIMENSION; j++) {
+ if (JacVel[i][j] != NULL) {
+ delete [] JacVel[i][j];
+ JacVel[i][j] = NULL;
+ }
+
+ if (JacB[i][j] != NULL) {
+ delete [] JacB[j][i];
+ JacB[i][j] = NULL;
+ }
+ }
+
+ for (i = 0; i < 7; i++) {
+ if (FilteredFields[i] != NULL)
+ delete [] FilteredFields[i];
+ FilteredFields[i] = NULL;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (FltrhoUU[i] != NULL)
+ delete [] FltrhoUU[i];
+ FltrhoUU[i] = NULL;
+
+ if (FltBB[i] != NULL)
+ delete [] FltBB[i];
+ FltBB[i] = NULL;
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (FltUB[i] != NULL)
+ delete [] FltUB[i];
+ FltUB[i] = NULL;
+ }
+
delete ParticleAcceleration[MAX_DIMENSION];
for (i = 0; i < MAX_NUMBER_OF_BARYON_FIELDS; i++) {
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/Make.config.objects
--- a/src/enzo/Make.config.objects
+++ b/src/enzo/Make.config.objects
@@ -963,7 +963,10 @@
Grid_FTStochasticForcing.o \
DrivenFlowInitialize.o \
Grid_DrivenFlowInitializeGrid.o \
- Grid_Phases.o
+ Grid_Phases.o \
+ Grid_SGSUtilities.o \
+ Grid_SGSAddEMFTerms.o \
+ Grid_SGSAddMomentumTerms.o
#-----------------------------------------------------------------------
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -531,6 +531,23 @@
DrivenFlowVelocity, DrivenFlowVelocity+1, DrivenFlowVelocity+2);
ret += sscanf(line, "DrivenFlowAutoCorrl = %"FSYM"%"FSYM"%"FSYM,
DrivenFlowAutoCorrl, DrivenFlowAutoCorrl+1, DrivenFlowAutoCorrl+2);
+
+ ret += sscanf(line, "UseSGSModel = %"ISYM, &UseSGSModel);
+ ret += sscanf(line, "SGSFilterStencil = %"ISYM, &SGSFilterStencil);
+ ret += sscanf(line, "SGSFilterWidth = %"FSYM, &SGSFilterWidth);
+ ret += sscanf(line, "SGSFilterWeights = %"FSYM"%"FSYM"%"FSYM"%"FSYM,
+ &SGSFilterWeights[0],&SGSFilterWeights[1],&SGSFilterWeights[2],&SGSFilterWeights[3]);
+ ret += sscanf(line, "SGScoeffERS2J2 = %"FSYM, &SGScoeffERS2J2);
+ ret += sscanf(line, "SGScoeffERS2M2Star = %"FSYM, &SGScoeffERS2M2Star);
+ ret += sscanf(line, "SGScoeffEVStarEnS2Star = %"FSYM, &SGScoeffEVStarEnS2Star);
+ ret += sscanf(line, "SGScoeffEnS2StarTrace = %"FSYM, &SGScoeffEnS2StarTrace);
+ ret += sscanf(line, "SGScoeffNLemfCompr = %"FSYM, &SGScoeffNLemfCompr);
+ ret += sscanf(line, "SGScoeffNLu = %"FSYM, &SGScoeffNLu);
+ ret += sscanf(line, "SGScoeffNLuNormedEnS2Star = %"FSYM, &SGScoeffNLuNormedEnS2Star);
+ ret += sscanf(line, "SGScoeffNLb =%"FSYM, &SGScoeffNLb);
+ ret += sscanf(line, "SGScoeffSSu = %"FSYM, &SGScoeffSSu);
+ ret += sscanf(line, "SGScoeffSSb =%"FSYM, &SGScoeffSSb);
+ ret += sscanf(line, "SGScoeffSSemf = %"FSYM, &SGScoeffSSemf);
#ifdef USE_GRACKLE
/* Grackle chemistry parameters */
@@ -1382,6 +1399,29 @@
DrivenFlowSeed);
}
+ /* In order to use filtered fields we need additional ghost zones */
+
+ if (SGSFilterStencil/2 + 2 > NumberOfGhostZones)
+ ENZO_FAIL("SGS filtering needs additional ghost zones!\n");
+
+ if (SGScoeffERS2J2 != 0. ||
+ SGScoeffERS2M2Star != 0. ||
+ SGScoeffEVStarEnS2Star != 0. ||
+ SGScoeffEnS2StarTrace != 0. ||
+ SGScoeffNLemfCompr != 0. ||
+ SGScoeffNLu != 0. ||
+ SGScoeffNLuNormedEnS2Star != 0. ||
+ SGScoeffNLb != 0.)
+
+ SGSNeedJacobians = 1;
+
+ if (SGScoeffSSu != 0. ||
+ SGScoeffSSb != 0. ||
+ SGScoeffSSemf != 0.)
+
+ SGSNeedMixedFilteredQuantities = 1;
+
+
/* Now we know which hydro solver we're using, we can assign the
default Riemann solver and flux reconstruction methods. These
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/SetDefaultGlobalValues.C
--- a/src/enzo/SetDefaultGlobalValues.C
+++ b/src/enzo/SetDefaultGlobalValues.C
@@ -402,6 +402,22 @@
DrivenFlowDomainLength[dim] = 0.0;
}
+ UseSGSModel = 0; // off
+ SGSFilterStencil = 0;
+ SGSNeedJacobians = 0;
+ SGSNeedMixedFilteredQuantities = 0;
+ SGSFilterWidth = 0.; // off
+ for (i = 0; i < 4; i++)
+ SGSFilterWeights[i] = 0.;
+ SGScoeffERS2J2 = 0.0; // off
+ SGScoeffERS2M2Star = 0.0; // off
+ SGScoeffEVStarEnS2Star = 0.0; // off
+ SGScoeffEnS2StarTrace = 0.0; // off
+ SGScoeffNLemfCompr = 0.0; // off
+ SGScoeffNLu = 0.0; // off
+ SGScoeffNLuNormedEnS2Star = 0.0; // off
+ SGScoeffNLb = 0.0; // off
+
RadiativeCooling = FALSE; // off
RadiativeCoolingModel = 1; //1=cool_rates.in table lookup
//3=Koyama&Inutsuka 2002
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/WriteParameterFile.C
--- a/src/enzo/WriteParameterFile.C
+++ b/src/enzo/WriteParameterFile.C
@@ -490,6 +490,19 @@
fprintf(fptr, "DrivenFlowProfile = %"ISYM"\n", DrivenFlowProfile);
if (DrivenFlowProfile)
Forcing.WriteParameters(fptr);
+ fprintf(fptr, "UseSGSModel = %"ISYM"\n", UseSGSModel);
+ fprintf(fptr, "SGSFilterWidth = %"FSYM"\n", SGSFilterWidth);
+ fprintf(fptr, "SGSFilterStencil = %"ISYM"\n", SGSFilterStencil);
+ fprintf(fptr, "SGSFilterWeights = %"FSYM" %"FSYM" %"FSYM" %"FSYM"\n",
+ SGSFilterWeights[0],SGSFilterWeights[1],SGSFilterWeights[2],SGSFilterWeights[3]);
+ fprintf(fptr, "SGScoeffERS2J2 = %"FSYM"\n", SGScoeffERS2J2);
+ fprintf(fptr, "SGScoeffERS2M2Star = %"FSYM"\n", SGScoeffERS2M2Star);
+ fprintf(fptr, "SGScoeffEVStarEnS2Star = %"FSYM"\n", SGScoeffEVStarEnS2Star);
+ fprintf(fptr, "SGScoeffEnS2StarTrace = %"FSYM"\n", SGScoeffEnS2StarTrace);
+ fprintf(fptr, "SGScoeffNLemfCompr = %"FSYM"\n", SGScoeffNLemfCompr);
+ fprintf(fptr, "SGScoeffNLu = %"FSYM"\n", SGScoeffNLu);
+ fprintf(fptr, "SGScoeffNLuNormedEnS2Star = %"FSYM"\n", SGScoeffNLuNormedEnS2Star);
+ fprintf(fptr, "SGScoeffNLb = %"FSYM"\n", SGScoeffNLb);
#ifdef USE_GRACKLE
/* Grackle chemistry parameters */
fprintf(fptr, "use_grackle = %d\n", grackle_data.use_grackle);
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/global_data.h
--- a/src/enzo/global_data.h
+++ b/src/enzo/global_data.h
@@ -397,6 +397,25 @@
EXTERN float DrivenFlowVelocity[MAX_DIMENSION];
EXTERN float DrivenFlowDomainLength[MAX_DIMENSION];
+/* Subgrid-scale model variables */
+EXTERN int UseSGSModel;
+EXTERN int SGSFilterStencil;
+EXTERN int SGSNeedJacobians;
+EXTERN int SGSNeedMixedFilteredQuantities;
+EXTERN float SGSFilterWidth;
+EXTERN float SGSFilterWeights[4];
+EXTERN float SGScoeffERS2J2;
+EXTERN float SGScoeffERS2M2Star;
+EXTERN float SGScoeffEVStarEnS2Star;
+EXTERN float SGScoeffEnS2StarTrace;
+EXTERN float SGScoeffNLemfCompr;
+EXTERN float SGScoeffNLu;
+EXTERN float SGScoeffNLuNormedEnS2Star;
+EXTERN float SGScoeffNLb;
+EXTERN float SGScoeffSSu;
+EXTERN float SGScoeffSSb;
+EXTERN float SGScoeffSSemf;
+
/* Multi-species rate equation flag and associated data. */
EXTERN int MultiSpecies;
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/hydro_rk/Grid_MHDSourceTerms.C
--- a/src/enzo/hydro_rk/Grid_MHDSourceTerms.C
+++ b/src/enzo/hydro_rk/Grid_MHDSourceTerms.C
@@ -331,6 +331,62 @@
}
}
}
+
+ if (UseSGSModel) {
+ if (SGSFilterWidth > 1.) {
+ if (this->SGSUtil_FilterFields() == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_FilterFields.\n");
+ return FAIL;
+ }
+
+
+ if (SGSNeedJacobians) {
+ if (this->SGSUtil_ComputeJacobian(JacVel,FilteredFields[1],FilteredFields[2],FilteredFields[3]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(Vel).\n");
+ return FAIL;
+ }
+
+ if (this->SGSUtil_ComputeJacobian(JacB,FilteredFields[4],FilteredFields[5],FilteredFields[6]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(B).\n");
+ return FAIL;
+ }
+ }
+
+ if (SGSNeedMixedFilteredQuantities) {
+ if (this->SGSUtil_ComputeMixedFilteredQuantities() == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeMixedFilteredQuantities().\n");
+ return FAIL;
+ }
+
+ }
+
+ } else {
+
+ /* we don't need a special check for SGSNeedJacobians here as all models apart
+ * from the scale-similarity model need Jacbobians and the scale-similarity model
+ * always has SGSFilterWidth > 1.
+ */
+ if (this->SGSUtil_ComputeJacobian(JacVel,BaryonField[Vel1Num],BaryonField[Vel2Num],BaryonField[Vel3Num]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(Vel).\n");
+ return FAIL;
+ }
+
+ if (this->SGSUtil_ComputeJacobian(JacB,BaryonField[B1Num],BaryonField[B2Num],BaryonField[B3Num]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(B).\n");
+ return FAIL;
+ }
+ }
+
+ if (this->SGSAddMomentumTerms(dU) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSAddMomentumTerms(dU).\n");
+ return FAIL;
+ }
+
+ if (this->SGSAddEMFTerms(dU) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSAddEMFTerms(dU).\n");
+ return FAIL;
+ }
+ }
/* Add centrifugal force for the shearing box */
diff -r 48dfbd3d8632 -r 00cdea5b78fb src/enzo/typedefs.h
--- a/src/enzo/typedefs.h
+++ b/src/enzo/typedefs.h
@@ -194,6 +194,22 @@
Parabolic = 2,
Band = 3;
+const enum_type
+/* indices used for vectors/Jacobians in SGS model */
+ X = 0,
+ Y = 1,
+ Z = 2,
+/* indices used for symmetric tensors */
+ XX = 0,
+ YY = 1,
+ ZZ = 2,
+ XY = 3,
+ YZ = 4,
+ XZ = 5,
+ YX = 3,
+ ZY = 4,
+ ZX = 5;
+
/* These are the different types of fluid boundary conditions. */
const boundary_type
https://bitbucket.org/enzo/enzo-dev/commits/7c420b8c801c/
Changeset: 7c420b8c801c
Branch: week-of-code
User: pgrete
Date: 2016-07-14 10:01:38+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 560 files
diff -r 00cdea5b78fb -r 7c420b8c801c .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -1,6 +1,9 @@
bbf0a2ffbd22c4fbecf946c9c96e6c4fac5cbdae woc_pre_fld_merge
48b4e9d9d6b90f703e48e621b488136be2a0e9cf woc_fld_merge
b86d8ba026d6a0ec30f15d8134add1e55fae2958 Wise10_GalaxyBirth
+c54b5698992759890ea799291b2653dbfa4f57cb enzo-2.1.0
+dc0d21ccb50a439003a531170e901bb718658463 enzo-2.1.1
+8f256686b9380b7f69492e995fac53ee0a88d98e enzo-2.0-release
2d90aa38e06f00a531db45a43225cde1faf093f2 enzo-2.2
a444b00827d3d4351dceea8197f1e446b9ada0da enzogold0001
bb3f1f2f445f7eb6792fe5144f5a0f1d0470a78e enzogold0002
@@ -9,3 +12,5 @@
0000000000000000000000000000000000000000 enzo-2.3
0000000000000000000000000000000000000000 enzo-2.3
afb6ed40470bb505ab36a52c1bb1dd2944e3d9b9 enzo-2.3
+d073462b1884d9653fc6dc2511a8bcdbb281993f enzo-2.4
+2984068d220f8fce3470447e9c26be383ba48c9f enzo-2.5
diff -r 00cdea5b78fb -r 7c420b8c801c doc/manual/source/parameters/hydro.rst
--- a/doc/manual/source/parameters/hydro.rst
+++ b/doc/manual/source/parameters/hydro.rst
@@ -175,6 +175,11 @@
``PPMSteepeningParameter`` (external)
A PPM modification designed to sharpen contact discontinuities. It
is either on (1) or off (0). Default: 0
+``SmallRho`` (external)
+ Minimum value for density in code units. This is enforced in euler.F
+ when using the PPM solver (``HydroMethod`` = 0) or in
+ hydro_rk/EvolveLevel_RK.C when ``HydroMethod`` is 3 or 4. Not enforced
+ in other hydrodynamics methods. Default: 1e-30
``ZEUSQuadraticArtificialViscosity`` (external)
This is the quadratic artificial viscosity parameter C2 of Stone &
Norman, and corresponds (roughly) to the number of zones over which
@@ -324,8 +329,6 @@
This parameter is used to add resistivity and thereby update magnetic fields in some set-ups; see ComputeResistivity in hydro_rk/Grid_AddResistivity.C. Default: 0
``UsePhysicalUnit`` (external)
For some test problems (mostly in hydro_rk), the relevant parameters could be defined in physical CGS units. Default: 0
-``SmallRho`` (external)
- Minimum value for density in hydro_rk/EvolveLevel_RK.C. Default: 1e-30 (note that the default value assumes UsePhysicalUnit = 1)
``SmallT`` (external)
Minimum value for temperature in hydro_rk/EvolveLevel_RK.C. Default: 1e-10 (note that the default value assumes UsePhysicalUnit = 1)
``SmallP``
diff -r 00cdea5b78fb -r 7c420b8c801c doc/manual/source/parameters/radiation.rst
--- a/doc/manual/source/parameters/radiation.rst
+++ b/doc/manual/source/parameters/radiation.rst
@@ -55,8 +55,8 @@
``RadiationFieldRedshift`` (external)
This parameter specifies the redshift at which the radiation field
is calculated. If a UV radiation background is used in a
- non-cosmological simulation, this needs to be defined. Default:
- (undefined)
+ non-cosmological simulation, this needs to be defined. Negative
+ redshifts are permitted. Default: (undefined)
``RadiationRedshiftOn`` (external)
The redshift at which the UV
background turns on. Default: 7.0.
diff -r 00cdea5b78fb -r 7c420b8c801c doc/manual/source/user_guide/EnzoTestSuite.rst
--- a/doc/manual/source/user_guide/EnzoTestSuite.rst
+++ b/doc/manual/source/user_guide/EnzoTestSuite.rst
@@ -72,8 +72,10 @@
::
- $ cd <enzo_root>/src/enzo
- $ make default
+ $ cd <enzo_root>
+ $ ./configure
+ $ cd ./src/enzo
+ $ make load-config-allphysics
$ make clean
$ make
@@ -81,17 +83,31 @@
since the enzo.exe will be symbolically linked from the src/enzo directory
into each test problem directory before tests are run.
+This build configuration requires that the Hypre and Grackle libraries are
+installed and visible in your compiler's search paths. If you do not have these
+libraries available, then you can set:
+
+::
+
+ $ make grackle-no
+ $ make hypre-no
+
+.. note::
+
+ If Enzo is compiled without support for the grackle and hypre libraries, tests
+ of Enzo modules that depend on these libraries will likely fail.
+
2. **Get the correct yt version** The enzo tests are generated and compared
-using the yt analysis suite. You must be using yt 2.6.3 in order for the test
-suite to work. The test suite has not yet been updated to work with yt 3.0 and
-newer releases. If you do not yet have yt, visit http://yt-project.org/#getyt
-for installation instructions. If you already have yt and yt is in your path,
-make sure you're using yt 2.6.3 by running the following commands:
+using the yt analysis suite. You must be using yt 3.3.0 or newer in order for
+the test suite to work. If you do not yet have yt, visit
+http://yt-project.org/#getyt for installation instructions. If you already have
+yt and yt is in your path, make sure you are using the latest verion of yt by
+running the following commands:
::
$ cd /path/to/yt_mercurial_repository
- $ hg update yt-2.x
+ $ hg update yt
$ python setup.py develop
3. **Generate answers to test with.** Run the test suite with these flags within
diff -r 00cdea5b78fb -r 7c420b8c801c run/Cosmology/SphericalInfall/SphericalInfall.enzo
--- a/run/Cosmology/SphericalInfall/SphericalInfall.enzo
+++ b/run/Cosmology/SphericalInfall/SphericalInfall.enzo
@@ -53,7 +53,7 @@
#
StaticHierarchy = 0 // dynamic hierarchy
MaximumRefinementLevel = 3 // use up to 3 levels
-RefineBy = 3 // refinement factor
+RefineBy = 2 // refinement factor
CellFlaggingMethod = 2 // baryon overdensity
MinimumOverDensityForRefinement = 2.0 // times the initial overdensity
MinimumEfficiency = 0.3 // fraction efficiency
diff -r 00cdea5b78fb -r 7c420b8c801c run/GravitySolver/GravityTest/GravityTest__test_gravity.py
--- a/run/GravitySolver/GravityTest/GravityTest__test_gravity.py
+++ b/run/GravitySolver/GravityTest/GravityTest__test_gravity.py
@@ -16,8 +16,8 @@
_attrs = ()
def __init__(self):
- self.pf = None
-
+ self.ds = None
+
def run(self):
Data = np.loadtxt("TestGravityCheckResults.out")
radius = Data[:,0]
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-1D/FreeExpansion/FreeExpansion__test_free_expansion.py
--- a/run/Hydro/Hydro-1D/FreeExpansion/FreeExpansion__test_free_expansion.py
+++ b/run/Hydro/Hydro-1D/FreeExpansion/FreeExpansion__test_free_expansion.py
@@ -6,6 +6,8 @@
from yt.frontends.enzo.answer_testing_support import \
requires_outputlog
+import numpy as na
+
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
@@ -14,12 +16,12 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- ray = self.pf.h.ray([0.0,0.5,0.5], [1.0,0.5,0.5])
+ ray = self.ds.ray([0.0,0.5,0.5], [1.0,0.5,0.5])
ray_length = np.sqrt(((ray.end_point - ray.start_point)**2).sum())
- ipos = na.argwhere(ray['VelocityMagnitude'] == 0.0)
+ ipos = na.argwhere(ray[('gas', 'velocity_magnitude')] == 0.0)
if len(ipos) > 0:
ipos = ipos.min()
else:
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-2D/AMRShockPool2D/AMRShockPool2D.enzo
--- a/run/Hydro/Hydro-2D/AMRShockPool2D/AMRShockPool2D.enzo
+++ b/run/Hydro/Hydro-2D/AMRShockPool2D/AMRShockPool2D.enzo
@@ -32,7 +32,7 @@
#
StaticHierarchy = 0 // dynamic hierarchy
MaximumRefinementLevel = 1 // 2 levels total
-RefineBy = 3 // refinement factor
+RefineBy = 2 // refinement factor
CellFlaggingMethod = 3 // use shock criteria for refinement
MinimumEfficiency = 0.8 // good value for 2d
#
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-2D/FreeExpansionAMR/FreeExpansionAMR__test_free_expansion.py
--- a/run/Hydro/Hydro-2D/FreeExpansionAMR/FreeExpansionAMR__test_free_expansion.py
+++ b/run/Hydro/Hydro-2D/FreeExpansionAMR/FreeExpansionAMR__test_free_expansion.py
@@ -5,6 +5,7 @@
sim_dir_load
from yt.frontends.enzo.answer_testing_support import \
requires_outputlog
+import numpy as na
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
@@ -14,12 +15,12 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- ray = self.pf.h.ray([0.0,0.0,0.5], [1.0,1.0,0.5])
+ ray = self.ds.ray([0.0,0.0,0.5], [1.0,1.0,0.5])
ray_length = np.sqrt(((ray.end_point - ray.start_point)**2).sum())
- ipos = na.argwhere(ray['VelocityMagnitude'] == 0.0)
+ ipos = na.argwhere(ray[('gas', 'velocity_magnitude')] == 0.0)
if len(ipos) > 0:
ipos = ipos.min()
else:
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-2D/NohProblem2D/NohProblem2D__test_noh2d.py
--- a/run/Hydro/Hydro-2D/NohProblem2D/NohProblem2D__test_noh2d.py
+++ b/run/Hydro/Hydro-2D/NohProblem2D/NohProblem2D__test_noh2d.py
@@ -7,6 +7,8 @@
from yt.frontends.enzo.answer_testing_support import \
requires_outputlog
+import numpy as na
+
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
@@ -15,11 +17,11 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- sl = self.pf.h.slice(2, 0.5)
+ # self.ds already exists
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0),
(400, 400), antialias=False)
dens = frb["Density"]
@@ -34,12 +36,12 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ # self.ds already exists
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
r = na.sqrt((x**2 + y**2))
@@ -56,8 +58,8 @@
assert_allclose(new_result, old_result, rtol=10**-tolerance, atol=0)
def plot(self):
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.h.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
r = na.sqrt((x**2 + y**2))
@@ -76,10 +78,10 @@
pl.xlim(0.0,na.sqrt(2.0))
pl.xlabel('r')
pl.ylabel('Density')
- pl.savefig('%s_density.png' % self.pf)
+ pl.savefig('%s_density.png' % self.ds)
pl.clf()
- return ['%s_density.png' % self.pf]
+ return ['%s_density.png' % self.ds]
@requires_outputlog(_dir_name, _pf_name)
def test_noh2d():
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-2D/NohProblem2DAMR/NohProblem2DAMR__test_noh2damr.py
--- a/run/Hydro/Hydro-2D/NohProblem2DAMR/NohProblem2DAMR__test_noh2damr.py
+++ b/run/Hydro/Hydro-2D/NohProblem2DAMR/NohProblem2DAMR__test_noh2damr.py
@@ -7,6 +7,8 @@
from yt.frontends.enzo.answer_testing_support import \
requires_outputlog
+import numpy as na
+
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
@@ -15,11 +17,10 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0),
(400, 400), antialias=False)
dens = frb["Density"]
@@ -34,12 +35,11 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
r = na.sqrt((x**2 + y**2))
@@ -56,8 +56,8 @@
assert_allclose(new_result, old_result, rtol=10**-tolerance, atol=0)
def plot(self):
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
r = na.sqrt((x**2 + y**2))
@@ -76,10 +76,10 @@
pl.xlim(0.0,na.sqrt(2.0))
pl.xlabel('r')
pl.ylabel('Density')
- pl.savefig('%s_density.png' % self.pf)
+ pl.savefig('%s_density.png' % self.ds)
pl.clf()
- return ['%s_density.png' % self.pf]
+ return ['%s_density.png' % self.ds]
# There's not much to plot, so we just return an empty list.
@requires_outputlog(_dir_name, _pf_name)
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-3D/NohProblem3D/NohProblem3D__test_noh3d.py
--- a/run/Hydro/Hydro-3D/NohProblem3D/NohProblem3D__test_noh3d.py
+++ b/run/Hydro/Hydro-3D/NohProblem3D/NohProblem3D__test_noh3d.py
@@ -15,11 +15,10 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0),
(100, 100), antialias=False)
dens = frb["Density"]
@@ -34,12 +33,11 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
z = dd['z']
@@ -57,8 +55,8 @@
assert_allclose(new_result, old_result, rtol=10**-tolerance, atol=0)
def plot(self):
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
z = dd['z']
@@ -78,10 +76,10 @@
pl.xlim(0.0,na.sqrt(3.0))
pl.xlabel('r')
pl.ylabel('Density')
- pl.savefig('%s_density.png' % self.pf)
+ pl.savefig('%s_density.png' % self.ds)
pl.clf()
- return ['%s_density.png' % self.pf]
+ return ['%s_density.png' % self.ds]
@requires_outputlog(_dir_name, _pf_name)
def test_noh3d():
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-3D/NohProblem3DAMR/NohProblem3DAMR__test_noh3damr.py
--- a/run/Hydro/Hydro-3D/NohProblem3DAMR/NohProblem3DAMR__test_noh3damr.py
+++ b/run/Hydro/Hydro-3D/NohProblem3DAMR/NohProblem3DAMR__test_noh3damr.py
@@ -15,11 +15,10 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0),
(400, 400), antialias=False)
dens = frb["Density"]
@@ -34,12 +33,11 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- # self.pf already exists
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ds.parameters['InitialTime']
x = dd['x']
y = dd['y']
z = dd['z']
@@ -57,8 +55,8 @@
assert_allclose(new_result, old_result, rtol=10**-tolerance, atol=0)
def plot(self):
- dd = self.pf.h.all_data()
- t = self.pf['InitialTime']
+ dd = self.ds.all_data()
+ t = self.ps.parameters['InitialTime']
x = dd['x']
y = dd['y']
z = dd['z']
@@ -79,10 +77,10 @@
pl.xlim(0.0,na.sqrt(3.0))
pl.xlabel('r')
pl.ylabel('Density')
- pl.savefig('%s_density.png' % self.pf)
+ pl.savefig('%s_density.png' % self.ds)
pl.clf()
- return ['%s_density.png' % self.pf]
+ return ['%s_density.png' % self.ds]
@requires_outputlog(_dir_name, _pf_name)
def test_noh3damr():
diff -r 00cdea5b78fb -r 7c420b8c801c run/Hydro/Hydro-3D/RotatingCylinder/RotatingCylinder__test_rotating_cylinder.py
--- a/run/Hydro/Hydro-3D/RotatingCylinder/RotatingCylinder__test_rotating_cylinder.py
+++ b/run/Hydro/Hydro-3D/RotatingCylinder/RotatingCylinder__test_rotating_cylinder.py
@@ -14,10 +14,10 @@
_attrs = ()
def __init__(self, pf):
- self.pf = pf
+ self.ds = pf
def run(self):
- ad = self.pf.h.all_data()
+ ad = self.ds.all_data()
return na.array(ad.quantities['TotalQuantity'](["AngularMomentumX",
"AngularMomentumY",
"AngularMomentumZ"]))
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/MHD2DRotorTest/MHD2DRotorTest__test_rotor.py
--- a/run/MHD/2D/MHD2DRotorTest/MHD2DRotorTest__test_rotor.py
+++ b/run/MHD/2D/MHD2DRotorTest/MHD2DRotorTest__test_rotor.py
@@ -8,18 +8,18 @@
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
-_fields = ('Density', 'Bx','Pressure')
+_fields = ('Density', 'Bx', 'pressure')
class TestRotorImage(AnswerTestingTest):
_type_name = "mhd_rotor_image"
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang.enzo
--- a/run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang.enzo
+++ b/run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang.enzo
@@ -13,7 +13,7 @@
MHD_CT_Method = 1 //0 = none, 1 = Balsara, 2 = Poisson, 3=RJ (use 1)
HydroMethod = 6
CourantSafetyNumber = 0.5
-
+BAnyl = 1
# Hydro control
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang__test_DivB_CT.py
--- a/run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang__test_DivB_CT.py
+++ b/run/MHD/2D/MHDCTOrszagTang/MHDCTOrszagTang__test_DivB_CT.py
@@ -13,7 +13,7 @@
standard_small_simulation
_base_fields = ("Density",
- "Pressure",
+ "pressure",
"x-velocity",
"y-velocity",
"Bx",
@@ -41,26 +41,10 @@
if not os.path.exists(filename):
raise EnzoTestOutputFileNonExistent(filename)
-# Defines a new derived field for divergence of B
-def _DivB_CT(field,data):
- """ Doesn't seem to work right now..."""
- bx = data['BxF']
- by = data['ByF']
- bz = data['BzF']
- #print d.shape
- nx,ny,nz=bx.shape
- #print "ManualDivB: bx shape", bx.shape
- nx-=1 #because it came from bx, which is one too large
- return ((bx[1:nx+1,:,:]-bx[0:nx,:,:])/data['dx']+
- (by[:,1:ny+1,:]-by[:,0:ny,:])/data['dy']+
- (bz[:,:,1:nz+1]-bz[:,:,0:nz])/data['dz'])
-
# Tests that Div B = 0
@requires_outputlog(_dir_name, _pf_name)
def test_DivB_CT():
""" Make sure that Divergence of B is zero everywhere. """
- add_field('DivB_CT', function = _DivB_CT,take_log=False,
- validators=[ValidateGridType()])
sim = sim_dir_load("MHDCTOrszagTang.enzo",
path="./MHD/2D/MHDCTOrszagTang",
find_outputs=True)
@@ -68,6 +52,6 @@
yield VerifySimulationSameTest(sim)
# Only test the last output.
pf = sim[-1]
- max_value = pf.h.find_max('DivB_CT')
+ max_value = pf.find_max('DivB')
max_value = float(max_value[0])
assert (max_value < 1e-10)
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR.enzo
--- a/run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR.enzo
+++ b/run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR.enzo
@@ -34,6 +34,7 @@
MHD_CT_Method = 1 //0 = none, 1 = Balsara, 2 = Poisson, 3=RJ (use 1)
HydroMethod = 6
CourantSafetyNumber = 0.5
+BAnyl = 1
# Hydro control
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR__test_DivB_CT.py
--- a/run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR__test_DivB_CT.py
+++ b/run/MHD/2D/MHDCTOrszagTangAMR/MHDCTOrszagTangAMR__test_DivB_CT.py
@@ -13,7 +13,7 @@
standard_small_simulation
_base_fields = ("Density",
- "Pressure",
+ "pressure",
"x-velocity",
"y-velocity",
"Bx",
@@ -38,25 +38,9 @@
if not os.path.exists(filename):
raise EnzoTestOutputFileNonExistent(filename)
-# Defines a new derived field for divergence of B
-def _DivB_CT(field,data):
- """ Doesn't seem to work right now..."""
- bx = data['BxF']
- by = data['ByF']
- bz = data['BzF']
- #print d.shape
- nx,ny,nz=bx.shape
- #print "ManualDivB: bx shape", bx.shape
- nx-=1 #because it came from bx, which is one too large
- return ((bx[1:nx+1,:,:]-bx[0:nx,:,:])/data['dx']+
- (by[:,1:ny+1,:]-by[:,0:ny,:])/data['dy']+
- (bz[:,:,1:nz+1]-bz[:,:,0:nz])/data['dz'])
-
@requires_outputlog(os.path.dirname(__file__), "MHDCTOrszagTangAMR.enzo") # Verifies that OutputLog exists
def test_DivB_CT():
""" Make sure that Divergence of B is zero everywhere. """
- add_field('DivB_CT', function = _DivB_CT,take_log=False,
- validators=[ValidateGridType()])
sim = sim_dir_load("MHDCTOrszagTangAMR.enzo",
path="./MHD/2D/MHDCTOrszagTangAMR",
find_outputs=True)
@@ -64,6 +48,6 @@
yield VerifySimulationSameTest(sim)
# Only test the last output.
pf = sim[-1]
- max_value = pf.h.find_max('DivB_CT')
+ max_value = pf.h.find_max('DivB')
max_value = float(max_value[0])
assert (max_value < 1e-10)
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/SedovBlast-MHD-2D-Fryxell/SedovBlast-MHD-2D-Fryxell__test_fryxell.py
--- a/run/MHD/2D/SedovBlast-MHD-2D-Fryxell/SedovBlast-MHD-2D-Fryxell__test_fryxell.py
+++ b/run/MHD/2D/SedovBlast-MHD-2D-Fryxell/SedovBlast-MHD-2D-Fryxell__test_fryxell.py
@@ -8,18 +8,18 @@
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
-_fields = ('Density', 'Pressure')
+_fields = ('Density', 'pressure')
class TestFryxellImage(AnswerTestingTest):
_type_name = "fryxell_image"
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/MHD/2D/SedovBlast-MHD-2D-Gardiner/SedovBlast-MHD-2D-Gardiner__test_gardiner.py
--- a/run/MHD/2D/SedovBlast-MHD-2D-Gardiner/SedovBlast-MHD-2D-Gardiner__test_gardiner.py
+++ b/run/MHD/2D/SedovBlast-MHD-2D-Gardiner/SedovBlast-MHD-2D-Gardiner__test_gardiner.py
@@ -8,18 +8,18 @@
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
-_fields = ('Density', 'Pressure', 'Bx', 'By')
+_fields = ('Density', 'pressure', 'Bx', 'By')
class TestGardinerImage(AnswerTestingTest):
_type_name = "gardiner_image"
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2, 0.5)
+ sl = self.ds.slice(2, 0.5)
frb = FixedResolutionBuffer(sl, (0.0, 1.0, 0.0, 1.0), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransport/PhotonShadowing/PhotonShadowing__test_photonshadowing.py
--- a/run/RadiationTransport/PhotonShadowing/PhotonShadowing__test_photonshadowing.py
+++ b/run/RadiationTransport/PhotonShadowing/PhotonShadowing__test_photonshadowing.py
@@ -15,11 +15,11 @@
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2,0.5)
+ sl = self.ds.slice(2,0.5)
frb = FixedResolutionBuffer(sl, (0,1,0,1), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransport/PhotonTest/PhotonTest__test_photontest.py
--- a/run/RadiationTransport/PhotonTest/PhotonTest__test_photontest.py
+++ b/run/RadiationTransport/PhotonTest/PhotonTest__test_photontest.py
@@ -15,11 +15,11 @@
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2,0.5)
+ sl = self.ds.slice(2,0.5)
frb = FixedResolutionBuffer(sl, (0,1,0,1), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransport/PhotonTestAMR/PhotonTestAMR__test_amrphotontest.py
--- a/run/RadiationTransport/PhotonTestAMR/PhotonTestAMR__test_amrphotontest.py
+++ b/run/RadiationTransport/PhotonTestAMR/PhotonTestAMR__test_amrphotontest.py
@@ -8,18 +8,18 @@
_pf_name = os.path.basename(os.path.dirname(__file__)) + ".enzo"
_dir_name = os.path.dirname(__file__)
-_fields = ('Density', 'HI_Fraction', 'HII_Fraction', 'HI_kph')
+_fields = ('Density', 'H_p0_fraction', 'H_p1_fraction', 'HI_kph')
class TestAMRPhotonTest(AnswerTestingTest):
_type_name = "photon_shadowing_image"
_attrs = ("field", )
def __init__(self, pf, field):
- self.pf = pf
+ self.ds = pf
self.field = field
def run(self):
- sl = self.pf.h.slice(2,0.5)
+ sl = self.ds.slice(2,0.5)
frb = FixedResolutionBuffer(sl, (0,1,0,1), (200,200))
dd = frb[self.field]
return np.array([dd.mean(), dd.std(), dd.min(), dd.max()])
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/CosmoIonization_q05z10.enzo
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z10/CosmoIonization_q05z10.enzo
@@ -0,0 +1,43 @@
+#
+# Shapiro & Giroux, isothermal, cosmological I-front test
+#
+# Daniel R. Reynolds, reyn...@smu.edu
+#
+#############################
+#
+# problem and general modules
+#
+ProblemType = 415 // CosmoIonizationInitialize init
+RadiativeTransferFLD = 2 // use FLD solver for radiation
+ImplicitProblem = 1 // use gFLDProblem module
+ComovingCoordinates = 1 // Expansion ON
+UseHydro = 0 // no hydro
+Unigrid = 1 // unigrid run
+RadiativeTransferOpticallyThinH2 = 0 // no 1/r^2 LW background
+RadHydroParamfile = CosmoIonization_q05z10.gfld
+#
+# grid and boundary
+#
+TopGridRank = 3
+LeftFaceBoundaryCondition = 1 1 1 // outflow
+RightFaceBoundaryCondition = 1 1 1 // outflow
+TopGridDimensions = 16 16 16
+#
+# runtime and I/O
+#
+StopCycle = 999999
+dtDataDump = 0.25
+ParallelRootGridIO = 1
+#
+# cosmology
+#
+CosmologyOmegaMatterNow = 0.1 // OmegaBaryonNow + OmegaCDMNow
+CosmologyOmegaLambdaNow = 0.0 // 1.0-OmegaMatterNow
+CosmologyHubbleConstantNow = 1.0 // [km/s/Mpc]
+CosmologyComovingBoxSize = 0.3 // 0.3 MPc [Mpc/h]
+CosmologyMaxExpansionRate = 0.015 // max delta(a)/a
+CosmologyInitialRedshift = 10.0
+CosmologyFinalRedshift = 1.0
+GravitationalConstant = 1
+
+#############################
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/Econtour_01.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/Econtour_01.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_01.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_01.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_05.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_05.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_10.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/HIcontour_10.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/profiles_05.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/profiles_05.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/profiles_10.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/profiles_10.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/radius.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/radius.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/sg_q05_check.py
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z10/sg_q05_check.py
@@ -0,0 +1,217 @@
+# numpy/scipy-based error-checking script for Shapiro & Giroux q=0.5 test
+# Daniel R. Reynolds, reyn...@smu.edu
+
+# imports
+from pylab import *
+import numpy as np
+
+# set the total number of snapshots
+te = 18
+
+# set the solution tolerance
+tol = 0.01
+
+# set some constants
+q0 = 0.05 # deceleration parameter
+Nph = 5.0e48 # ionization source strength [photons/sec]
+alpha2 = 2.52e-13 # recombination rate coefficient
+mp = 1.67262171e-24 # proton mass [g]
+Myr = 3.15576e13 # duration of a Megayear [sec]
+
+# initialize time-history outputs
+# row 1: i-front radius
+# row 2: stromgren sphere radius (rs)
+# row 3: redshift (z)
+# row 4: time (t)
+# row 5: i-front radius (analytical)
+# row 6: i-front velocity (analytical)
+rdata = zeros( (6, te+1), dtype=float);
+
+
+##########
+# define some helpful functions
+def get_params(file):
+ """Returns z0, z, xR, t0, H0, dUnit, tUnit, lUnit from a parameter files"""
+ import shlex
+ f = open(file)
+ for line in f:
+ text = shlex.split(line)
+ if ("CosmologyInitialRedshift" in text):
+ z0 = float(text[len(text)-1])
+ elif ("CosmologyCurrentRedshift" in text):
+ z = float(text[len(text)-1])
+ elif ("InitialTime" in text):
+ t0 = float(text[len(text)-1])
+ elif ("CosmologyHubbleConstantNow" in text):
+ H0 = float(text[len(text)-1])
+ f.close()
+ f = open(file + '.rtmodule' )
+ for line in f:
+ text = shlex.split(line)
+ if ("DensityUnits" in text):
+ dUnit = float(text[len(text)-1])
+ elif ("TimeUnits" in text):
+ tUnit = float(text[len(text)-1])
+ elif ("LengthUnits" in text):
+ lUnit = float(text[len(text)-1])
+ f.close()
+ xR = lUnit
+ t0 *= tUnit
+ H0 *= 100*1e5/3.0857e24 # H0 units given 100km/s/Mpc, convert to 1/s
+ return [z0, z, xR, t0, H0, dUnit, tUnit, lUnit]
+
+
+##########
+def analytical_solution(q0,Nph,aval):
+ """Analytical solution driver, returns rI, vI"""
+ import h5py
+ import numpy as np
+ import scipy.integrate as sp
+ z0, z, xR, t0, H0, dUnit, tUnit, lUnit = get_params('DD0000/data0000')
+ f = h5py.File('DD0000/data0000.cpu0000','r')
+ rho_data = f.get('/Grid00000001/Density')
+ rho = rho_data[0][0][0]*dUnit
+ del(rho_data)
+ mp = 1.67262171e-24
+ # initial nH: no need to scale by a, since a(z0)=1, but we do
+ # need to accomodate for Helium in analytical soln
+# nH0 = rho/mp*0.76
+ nH0 = rho/mp
+
+ # We first set the parameter lamda = chi_{eff} alpha2 cl n_{H,0} t0, where
+ # chi_{eff} = correction for presence of He atoms [1 -- no correction]
+ # alpha2 = Hydrogen recombination coefficient [2.6e-13 -- case B]
+ # cl = the gas clumping factor [1 -- homogeneous medium]
+ # n_{H,0} = initial Hydrogen number density
+ # t0 = initial time
+ alpha2 = 2.52e-13
+ lamda = alpha2*nH0*t0
+
+ # Compute the initial Stromgren radius, rs0 (proper, CGS units)
+ rs0 = (Nph*3.0/4.0/pi/alpha2/nH0/nH0)**(1.0/3.0) # no rescaling since a(z0)=1
+
+ # We have the general formula for y(t):
+ # y(t) = (lamda/xi)exp(-tau(t)) integral_{1}^{a(t)} [da'
+ # exp(t(a'))/sqrt(1-2q0 + 2q0(1+z0)/a')] , where
+ # xi = H0*t0*(1+z0),
+ # H0 = Hubble constant
+ # tau(a) = (lamda/xi)*[F(a)-F(1)]/[3(2q0)^2(1+z0)^2/2],
+ # F(a) = [2(1-2q0) - 2q0(1+z0)/a]*sqrt(1-2q0+2q0(1+z0)/a)
+ #
+ # Here, a' is the variable of integration, not the time-derivative of a.
+ F1 = (2.0*(1.0-2.0*q0) - 2.0*q0*(1.0+z0))*sqrt(1.0-2.0*q0+2.0*q0*(1.0+z0))
+ xi = H0*t0*(1.0+z0)
+
+ # set integration nodes/values (lots)
+ inodes = 1000001
+ if (aval == 1.0):
+ numint = 0.0
+ else:
+ a = linspace(1,aval,inodes)
+ integrand = zeros(inodes, dtype=float)
+ arat = divide(2.0*q0*(1.0+z0), a)
+ sqa = sqrt(add(1.0-2.0*q0, arat))
+ afac = subtract(2*(1-2*q0), arat)
+ arg1 = subtract(afac*sqa, F1)
+ arg2 = exp(multiply((lamda/xi)/(6*q0*q0*(1+z0)*(1+z0)), arg1))
+ integrand = divide(arg2,sqa)
+
+ # perform numerical integral via composite Simpson's rule
+ numint = sp.simps(integrand, a)
+ tauval = (lamda/xi)*((2*(1-2*q0) - 2*q0*(1+z0)/aval)*sqrt(1-2*q0+2*q0*(1+z0)/aval)-F1)/(6*q0*q0*(1+z0)*(1+z0))
+ y = lamda/xi*exp(-tauval)*numint;
+
+ # extract the current Stromgren radius and velocity
+ ythird = sign(y)*abs(y)**(1.0/3.0);
+ rI = ythird/aval # compute ratio rI/rS
+ vI = (lamda/3)*aval/ythird*ythird*(1.0-y/aval**3);
+ return [rI, vI]
+
+
+
+##########
+def load_vals(tdump):
+ """Returns t, z, xR, nH, Eg, xHI, xHII from a given data dump"""
+ import h5py
+ import numpy as np
+ sdump = repr(tdump).zfill(4)
+ pfile = 'DD' + sdump + '/data' + sdump
+ hfile = pfile + '.cpu0000'
+ z0, z, xR, tval, H0, dUnit, tUnit, lUnit = get_params(pfile)
+ f = h5py.File(hfile,'r')
+ Eg = f.get('/Grid00000001/Grey_Radiation_Energy')
+ HI = f.get('/Grid00000001/HI_Density')
+ HII = f.get('/Grid00000001/HII_Density')
+ rho = f.get('/Grid00000001/Density')
+ # add floor values for happy numerics
+ HI = np.add(HI, 1.0e-10)
+ HII = np.add(HII, 1.0e-10)
+ Eg = np.add(Eg, 1.0e-30)
+ HIfrac = np.divide(HI,rho)
+ HIIfrac = np.divide(HII,rho)
+ Eg = np.multiply(Eg,dUnit*lUnit*lUnit/tUnit/tUnit)
+ nH = rho[0][0][0]*dUnit/1.67262171e-24
+ return [tval, z, xR, nH, Eg, HIfrac, HIIfrac]
+
+##########
+
+
+
+
+# loop over snapshots, loading values and times
+for tstep in range(te+1):
+
+ # load relevant information
+ t, z, xR, nH, Eg, xHI, xHII = load_vals(tstep)
+
+ # compute current Stromgren radius
+ rs = (Nph*3.0/4.0/pi/alpha2/nH/nH)**(1.0/3.0)
+
+ # store initial hydrogen number density
+ if (tstep == 0):
+ ti = t
+ zi = z
+ nHi = nH
+ rsi = rs
+
+ # compute volume element
+ nx, ny, nz = Eg.shape
+ dV = xR*xR*xR/nx/ny/nz
+
+ # compute I-front radius (assuming spherical)
+ HIIvolume = sum(xHII)*dV*8.0
+ rloc = (3.0/4.0*HIIvolume/pi)**(1.0/3.0)
+
+ # get analytical solutions for i-front position and velocity
+ a = (1.0+zi)/(1.0+z) # paper's version of a
+ ranal, vanal = analytical_solution(q0,Nph,a)
+
+ # store data
+ rdata[0][tstep] = rloc
+ rdata[1][tstep] = rs
+ rdata[2][tstep] = z
+ rdata[3][tstep] = t
+ rdata[4][tstep] = ranal
+ rdata[5][tstep] = vanal
+
+
+
+# I-front radius/velocity plots vs analytical solutions
+# scaled i-front position
+r_ratio = rdata[0]/rdata[1]
+ranal_ratio = rdata[4]
+
+# i-front position comparison
+r_err = []
+for it in range(0, te+1):
+ r_err.append( (r_ratio[it]-ranal_ratio[it])/(ranal_ratio[it]+r_ratio[it]+0.1) )
+
+# compute the error norm
+err_norm = (np.sum(np.multiply(r_err,r_err))/te)**(0.5)
+if (err_norm < tol):
+ print 'Error of ',err_norm,' is below tolerance ',tol
+ print 'PASS'
+else:
+ print 'Error of ',err_norm,' is above tolerance ',tol
+ print 'FAIL'
+
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10/velocity.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z10/velocity.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10_sp/sg_q05_check.py
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z10_sp/sg_q05_check.py
@@ -0,0 +1,217 @@
+# numpy/scipy-based error-checking script for Shapiro & Giroux q=0.5 test
+# Daniel R. Reynolds, reyn...@smu.edu
+
+# imports
+from pylab import *
+import numpy as np
+
+# set the total number of snapshots
+te = 18
+
+# set the solution tolerance
+tol = 0.01
+
+# set some constants
+q0 = 0.05 # deceleration parameter
+Nph = 5.0e48 # ionization source strength [photons/sec]
+alpha2 = 2.52e-13 # recombination rate coefficient
+mp = 1.67262171e-24 # proton mass [g]
+Myr = 3.15576e13 # duration of a Megayear [sec]
+
+# initialize time-history outputs
+# row 1: i-front radius
+# row 2: stromgren sphere radius (rs)
+# row 3: redshift (z)
+# row 4: time (t)
+# row 5: i-front radius (analytical)
+# row 6: i-front velocity (analytical)
+rdata = zeros( (6, te+1), dtype=float);
+
+
+##########
+# define some helpful functions
+def get_params(file):
+ """Returns z0, z, xR, t0, H0, dUnit, tUnit, lUnit from a parameter files"""
+ import shlex
+ f = open(file)
+ for line in f:
+ text = shlex.split(line)
+ if ("CosmologyInitialRedshift" in text):
+ z0 = float(text[len(text)-1])
+ elif ("CosmologyCurrentRedshift" in text):
+ z = float(text[len(text)-1])
+ elif ("InitialTime" in text):
+ t0 = float(text[len(text)-1])
+ elif ("CosmologyHubbleConstantNow" in text):
+ H0 = float(text[len(text)-1])
+ f.close()
+ f = open(file + '.rtmodule' )
+ for line in f:
+ text = shlex.split(line)
+ if ("DensityUnits" in text):
+ dUnit = float(text[len(text)-1])
+ elif ("TimeUnits" in text):
+ tUnit = float(text[len(text)-1])
+ elif ("LengthUnits" in text):
+ lUnit = float(text[len(text)-1])
+ f.close()
+ xR = lUnit
+ t0 *= tUnit
+ H0 *= 100*1e5/3.0857e24 # H0 units given 100km/s/Mpc, convert to 1/s
+ return [z0, z, xR, t0, H0, dUnit, tUnit, lUnit]
+
+
+##########
+def analytical_solution(q0,Nph,aval):
+ """Analytical solution driver, returns rI, vI"""
+ import h5py
+ import numpy as np
+ import scipy.integrate as sp
+ z0, z, xR, t0, H0, dUnit, tUnit, lUnit = get_params('DD0000/data0000')
+ f = h5py.File('DD0000/data0000.cpu0000','r')
+ rho_data = f.get('/Grid00000001/Density')
+ rho = rho_data[0][0][0]*dUnit
+ del(rho_data)
+ mp = 1.67262171e-24
+ # initial nH: no need to scale by a, since a(z0)=1, but we do
+ # need to accomodate for Helium in analytical soln
+# nH0 = rho/mp*0.76
+ nH0 = rho/mp
+
+ # We first set the parameter lamda = chi_{eff} alpha2 cl n_{H,0} t0, where
+ # chi_{eff} = correction for presence of He atoms [1 -- no correction]
+ # alpha2 = Hydrogen recombination coefficient [2.6e-13 -- case B]
+ # cl = the gas clumping factor [1 -- homogeneous medium]
+ # n_{H,0} = initial Hydrogen number density
+ # t0 = initial time
+ alpha2 = 2.52e-13
+ lamda = alpha2*nH0*t0
+
+ # Compute the initial Stromgren radius, rs0 (proper, CGS units)
+ rs0 = (Nph*3.0/4.0/pi/alpha2/nH0/nH0)**(1.0/3.0) # no rescaling since a(z0)=1
+
+ # We have the general formula for y(t):
+ # y(t) = (lamda/xi)exp(-tau(t)) integral_{1}^{a(t)} [da'
+ # exp(t(a'))/sqrt(1-2q0 + 2q0(1+z0)/a')] , where
+ # xi = H0*t0*(1+z0),
+ # H0 = Hubble constant
+ # tau(a) = (lamda/xi)*[F(a)-F(1)]/[3(2q0)^2(1+z0)^2/2],
+ # F(a) = [2(1-2q0) - 2q0(1+z0)/a]*sqrt(1-2q0+2q0(1+z0)/a)
+ #
+ # Here, a' is the variable of integration, not the time-derivative of a.
+ F1 = (2.0*(1.0-2.0*q0) - 2.0*q0*(1.0+z0))*sqrt(1.0-2.0*q0+2.0*q0*(1.0+z0))
+ xi = H0*t0*(1.0+z0)
+
+ # set integration nodes/values (lots)
+ inodes = 1000001
+ if (aval == 1.0):
+ numint = 0.0
+ else:
+ a = linspace(1,aval,inodes)
+ integrand = zeros(inodes, dtype=float)
+ arat = divide(2.0*q0*(1.0+z0), a)
+ sqa = sqrt(add(1.0-2.0*q0, arat))
+ afac = subtract(2*(1-2*q0), arat)
+ arg1 = subtract(afac*sqa, F1)
+ arg2 = exp(multiply((lamda/xi)/(6*q0*q0*(1+z0)*(1+z0)), arg1))
+ integrand = divide(arg2,sqa)
+
+ # perform numerical integral via composite Simpson's rule
+ numint = sp.simps(integrand, a)
+ tauval = (lamda/xi)*((2*(1-2*q0) - 2*q0*(1+z0)/aval)*sqrt(1-2*q0+2*q0*(1+z0)/aval)-F1)/(6*q0*q0*(1+z0)*(1+z0))
+ y = lamda/xi*exp(-tauval)*numint;
+
+ # extract the current Stromgren radius and velocity
+ ythird = sign(y)*abs(y)**(1.0/3.0);
+ rI = ythird/aval # compute ratio rI/rS
+ vI = (lamda/3)*aval/ythird*ythird*(1.0-y/aval**3);
+ return [rI, vI]
+
+
+
+##########
+def load_vals(tdump):
+ """Returns t, z, xR, nH, Eg, xHI, xHII from a given data dump"""
+ import h5py
+ import numpy as np
+ sdump = repr(tdump).zfill(4)
+ pfile = 'DD' + sdump + '/data' + sdump
+ hfile = pfile + '.cpu0000'
+ z0, z, xR, tval, H0, dUnit, tUnit, lUnit = get_params(pfile)
+ f = h5py.File(hfile,'r')
+ Eg = f.get('/Grid00000001/Grey_Radiation_Energy')
+ HI = f.get('/Grid00000001/HI_Density')
+ HII = f.get('/Grid00000001/HII_Density')
+ rho = f.get('/Grid00000001/Density')
+ # add floor values for happy numerics
+ HI = np.add(HI, 1.0e-10)
+ HII = np.add(HII, 1.0e-10)
+ Eg = np.add(Eg, 1.0e-30)
+ HIfrac = np.divide(HI,rho)
+ HIIfrac = np.divide(HII,rho)
+ Eg = np.multiply(Eg,dUnit*lUnit*lUnit/tUnit/tUnit)
+ nH = rho[0][0][0]*dUnit/1.67262171e-24
+ return [tval, z, xR, nH, Eg, HIfrac, HIIfrac]
+
+##########
+
+
+
+
+# loop over snapshots, loading values and times
+for tstep in range(te+1):
+
+ # load relevant information
+ t, z, xR, nH, Eg, xHI, xHII = load_vals(tstep)
+
+ # compute current Stromgren radius
+ rs = (Nph*3.0/4.0/pi/alpha2/nH/nH)**(1.0/3.0)
+
+ # store initial hydrogen number density
+ if (tstep == 0):
+ ti = t
+ zi = z
+ nHi = nH
+ rsi = rs
+
+ # compute volume element
+ nx, ny, nz = Eg.shape
+ dV = xR*xR*xR/nx/ny/nz
+
+ # compute I-front radius (assuming spherical)
+ HIIvolume = sum(xHII)*dV*8.0
+ rloc = (3.0/4.0*HIIvolume/pi)**(1.0/3.0)
+
+ # get analytical solutions for i-front position and velocity
+ a = (1.0+zi)/(1.0+z) # paper's version of a
+ ranal, vanal = analytical_solution(q0,Nph,a)
+
+ # store data
+ rdata[0][tstep] = rloc
+ rdata[1][tstep] = rs
+ rdata[2][tstep] = z
+ rdata[3][tstep] = t
+ rdata[4][tstep] = ranal
+ rdata[5][tstep] = vanal
+
+
+
+# I-front radius/velocity plots vs analytical solutions
+# scaled i-front position
+r_ratio = rdata[0]/rdata[1]
+ranal_ratio = rdata[4]
+
+# i-front position comparison
+r_err = []
+for it in range(0, te+1):
+ r_err.append( (r_ratio[it]-ranal_ratio[it])/(ranal_ratio[it]+r_ratio[it]+0.1) )
+
+# compute the error norm
+err_norm = (np.sum(np.multiply(r_err,r_err))/te)**(0.5)
+if (err_norm < tol):
+ print 'Error of ',err_norm,' is below tolerance ',tol
+ print 'PASS'
+else:
+ print 'Error of ',err_norm,' is above tolerance ',tol
+ print 'FAIL'
+
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z10_sp/sg_q05z10_makeplots.py
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z10_sp/sg_q05z10_makeplots.py
@@ -0,0 +1,291 @@
+# matplotlib-based plotting script for Shapiro & Giroux q=0.5 test
+# Daniel R. Reynolds, reyn...@smu.edu
+
+# imports
+from pylab import *
+
+
+# set the total number of snapshots
+te = 21
+
+# set the graphics output type
+pictype = '.png'
+
+# set some constants
+q0 = 0.05 # deceleration parameter
+Nph = 5.0e48 # ionization source strength [photons/sec]
+alpha2 = 2.52e-13 # recombination rate coefficient
+mp = 1.67262171e-24 # proton mass [g]
+Myr = 3.15576e13 # duration of a Megayear [sec]
+
+# initialize time-history outputs
+# row 1: i-front radius
+# row 2: stromgren sphere radius (rs)
+# row 3: redshift (z)
+# row 4: time (t)
+# row 5: i-front radius (analytical)
+# row 6: i-front velocity (analytical)
+rdata = zeros( (6, te+1), dtype=float);
+
+
+##########
+# define some helpful functions
+def get_params(file):
+ """Returns z0, z, xR, t0, H0, dUnit, tUnit, lUnit from a parameter files"""
+ import shlex
+ f = open(file)
+ for line in f:
+ text = shlex.split(line)
+ if ("CosmologyInitialRedshift" in text):
+ z0 = float(text[len(text)-1])
+ elif ("CosmologyCurrentRedshift" in text):
+ z = float(text[len(text)-1])
+ elif ("InitialTime" in text):
+ t0 = float(text[len(text)-1])
+ elif ("CosmologyHubbleConstantNow" in text):
+ H0 = float(text[len(text)-1])
+ f.close()
+ f = open(file + '.rtmodule' )
+ for line in f:
+ text = shlex.split(line)
+ if ("DensityUnits" in text):
+ dUnit = float(text[len(text)-1])
+ elif ("TimeUnits" in text):
+ tUnit = float(text[len(text)-1])
+ elif ("LengthUnits" in text):
+ lUnit = float(text[len(text)-1])
+ f.close()
+ xR = lUnit
+ t0 *= tUnit
+ H0 *= 100*1e5/3.0857e24 # H0 units given 100km/s/Mpc, convert to 1/s
+ return [z0, z, xR, t0, H0, dUnit, tUnit, lUnit]
+
+
+##########
+def analytical_solution(q0,Nph,aval):
+ """Analytical solution driver, returns rI, vI"""
+ import h5py
+ import numpy as np
+ import scipy.integrate as sp
+ z0, z, xR, t0, H0, dUnit, tUnit, lUnit = get_params('DD0000/data0000')
+ f = h5py.File('DD0000/data0000.cpu0000','r')
+ rho_data = f.get('/Grid00000001/Density')
+ rho = rho_data[0][0][0]*dUnit
+ del(rho_data)
+ mp = 1.67262171e-24
+ # initial nH: no need to scale by a, since a(z0)=1, but we do
+ # need to accomodate for Helium in analytical soln
+# nH0 = rho/mp*0.76
+ nH0 = rho/mp
+
+ # We first set the parameter lamda = chi_{eff} alpha2 cl n_{H,0} t0, where
+ # chi_{eff} = correction for presence of He atoms [1 -- no correction]
+ # alpha2 = Hydrogen recombination coefficient [2.6e-13 -- case B]
+ # cl = the gas clumping factor [1 -- homogeneous medium]
+ # n_{H,0} = initial Hydrogen number density
+ # t0 = initial time
+ alpha2 = 2.52e-13
+ lamda = alpha2*nH0*t0
+
+ # Compute the initial Stromgren radius, rs0 (proper, CGS units)
+ rs0 = (Nph*3.0/4.0/pi/alpha2/nH0/nH0)**(1.0/3.0) # no rescaling since a(z0)=1
+
+ # We have the general formula for y(t):
+ # y(t) = (lamda/xi)exp(-tau(t)) integral_{1}^{a(t)} [da'
+ # exp(t(a'))/sqrt(1-2q0 + 2q0(1+z0)/a')] , where
+ # xi = H0*t0*(1+z0),
+ # H0 = Hubble constant
+ # tau(a) = (lamda/xi)*[F(a)-F(1)]/[3(2q0)^2(1+z0)^2/2],
+ # F(a) = [2(1-2q0) - 2q0(1+z0)/a]*sqrt(1-2q0+2q0(1+z0)/a)
+ #
+ # Here, a' is the variable of integration, not the time-derivative of a.
+ F1 = (2.0*(1.0-2.0*q0) - 2.0*q0*(1.0+z0))*sqrt(1.0-2.0*q0+2.0*q0*(1.0+z0))
+ xi = H0*t0*(1.0+z0)
+
+ # set integration nodes/values (lots)
+ inodes = 1000001
+ a = linspace(1,aval,inodes)
+ integrand = zeros(inodes, dtype=float)
+ arat = divide(2.0*q0*(1.0+z0), a)
+ sqa = sqrt(add(1.0-2.0*q0, arat))
+ afac = subtract(2*(1-2*q0), arat)
+ arg1 = subtract(afac*sqa, F1)
+ arg2 = exp(multiply((lamda/xi)/(6*q0*q0*(1+z0)*(1+z0)), arg1))
+ integrand = divide(arg2,sqa)
+
+ # perform numerical integral via composite Simpson's rule
+ numint = sp.simps(integrand, a)
+ tauval = (lamda/xi)*((2*(1-2*q0) - 2*q0*(1+z0)/aval)*sqrt(1-2*q0+2*q0*(1+z0)/aval)-F1)/(6*q0*q0*(1+z0)*(1+z0))
+ y = lamda/xi*exp(-tauval)*numint;
+
+ # extract the current Stromgren radius and velocity
+ ythird = sign(y)*abs(y)**(1.0/3.0);
+ rI = ythird/aval # compute ratio rI/rS
+ vI = (lamda/3)*aval/ythird*ythird*(1.0-y/aval**3);
+ return [rI, vI]
+
+
+
+##########
+def load_vals(tdump):
+ """Returns t, z, xR, nH, Eg, xHI, xHII from a given data dump"""
+ import h5py
+ import numpy as np
+ sdump = repr(tdump).zfill(4)
+ pfile = 'DD' + sdump + '/data' + sdump
+ hfile = pfile + '.cpu0000'
+ z0, z, xR, tval, H0, dUnit, tUnit, lUnit = get_params(pfile)
+ f = h5py.File(hfile,'r')
+ Eg = f.get('/Grid00000001/Grey_Radiation_Energy')
+ HI = f.get('/Grid00000001/HI_Density')
+ HII = f.get('/Grid00000001/HII_Density')
+ rho = f.get('/Grid00000001/Density')
+ # add floor values for happy numerics
+ HI = np.add(HI, 1.0e-10)
+ HII = np.add(HII, 1.0e-10)
+ Eg = np.add(Eg, 1.0e-30)
+ HIfrac = np.divide(HI,rho)
+ HIIfrac = np.divide(HII,rho)
+ Eg = np.multiply(Eg,dUnit*lUnit*lUnit/tUnit/tUnit)
+ nH = rho[0][0][0]*dUnit/1.67262171e-24
+ return [tval, z, xR, nH, Eg, HIfrac, HIIfrac]
+
+##########
+
+
+
+
+# loop over snapshots, loading values and times
+for tstep in range(te+1):
+
+ # load relevant information
+ t, z, xR, nH, Eg, xHI, xHII = load_vals(tstep)
+
+ # compute current Stromgren radius
+ rs = (Nph*3.0/4.0/pi/alpha2/nH/nH)**(1.0/3.0)
+
+ # store initial hydrogen number density
+ if (tstep == 0):
+ ti = t
+ zi = z
+ nHi = nH
+ rsi = rs
+
+ # compute volume element
+ nx, ny, nz = Eg.shape
+ dV = xR*xR*xR/nx/ny/nz
+
+ # compute I-front radius (assuming spherical)
+ HIIvolume = sum(xHII)*dV*8.0
+ rloc = (3.0/4.0*HIIvolume/pi)**(1.0/3.0)
+
+ # get analytical solutions for i-front position and velocity
+ a = (1.0+zi)/(1.0+z) # paper's version of a
+ ranal, vanal = analytical_solution(q0,Nph,a)
+
+ # store data
+ rdata[0][tstep] = rloc
+ rdata[1][tstep] = rs
+ rdata[2][tstep] = z
+ rdata[3][tstep] = t
+ rdata[4][tstep] = ranal
+ rdata[5][tstep] = vanal
+
+ # generate 2D plots at certain times
+ if (tstep == 1) or (tstep == 5) or (tstep == 10):
+
+ # set mesh
+ x = linspace(0.0,1.0,nx)
+ y = linspace(0.0,1.0,ny)
+ X, Y = meshgrid(x,y)
+
+ # xHI slice through z=0
+ figure()
+ sl = log10(xHI[:][:][0])
+ h = imshow(sl, hold=False, extent=(0.0, 1.0, 0.0, 1.0), origin='lower')
+ colorbar(h)
+ title('log HI fraction, z = ' + repr(z))
+ savefig('HIcontour_' + repr(tstep).zfill(2) + pictype)
+
+ # Eg slice through z=0
+ figure()
+ sl = log10(Eg[:][:][0])
+ h = imshow(sl, hold=False, extent=(0.0, 1.0, 0.0, 1.0), origin='lower')
+ colorbar(h)
+ title('log radiation density, z = ' + repr(z))
+ savefig('Econtour_' + repr(tstep).zfill(2) + pictype)
+
+ # spherically-averaged profiles for xHI, xHII
+ Nradii = nx*3/2
+ Hradii = linspace(0.0,sqrt(3.0),Nradii)
+ rad_idx = zeros( (nx,ny,nz), dtype=float)
+ for k in range(nz):
+ zloc = (k+0.5)/nz
+ for j in range(ny):
+ yloc = (j+0.5)/ny
+ for i in range(nx):
+ xloc = (i+0.5)/nx
+ rad_idx[i][j][k] = max(0,floor(sqrt(xloc*xloc + yloc*yloc + zloc*zloc)/sqrt(3.0)*Nradii))
+ Hcount = 1.0e-16*ones(Nradii)
+ HIprof = zeros(Nradii, dtype=float)
+ HIIprof = zeros(Nradii, dtype=float)
+ for k in range(nz):
+ for j in range(ny):
+ for i in range(nx):
+ idx = rad_idx[i][j][k]
+ HIprof[idx] += xHI[i][j][k]
+ HIIprof[idx] += xHII[i][j][k]
+ Hcount[idx] += 1
+ HIprof = log10(HIprof/Hcount)
+ HIIprof = log10(HIIprof/Hcount)
+ figure()
+ plot(Hradii,HIprof,'b-',Hradii,HIIprof,'r--')
+ grid()
+ xlabel('$r/L_{box}$')
+ ylabel('log(xHI), log(xHII)')
+ title('HI, HII Profiles, z = ' + repr(z))
+ legend( ('xHI','xHII') )
+ axis([ 0.0, 1.2, -7.0, 1.0 ])
+ savefig('profiles_' + repr(tstep).zfill(2) + pictype)
+
+
+# I-front radius/velocity plots vs analytical solutions
+# scaled I-front velocity
+v_ratio = (rdata[0][2:te+1]-rdata[0][1:te])/(rdata[3][2:te+1]-rdata[3][1:te])/(rsi/ti)
+vanal_ratio = (rdata[5][2:te+1]+rdata[5][1:te])*0.5
+
+# scaled i-front position
+r_ratio = rdata[0]/rdata[1]
+ranal_ratio = rdata[4]
+
+# scaled redshift (cell centsteprs)
+z_ratio = (1.0 + rdata[2])/(1.0+zi)
+
+# scaled redshift2 (cell faces)
+z_ratio2 = (1.0 + rdata[2][2:te+1])/(1.0+zi)
+
+# i-front position vs redshift plot
+figure()
+xdata = -log10(z_ratio)
+plot(xdata,r_ratio,'b-',xdata,ranal_ratio,'r--')
+xlabel('$-log[(1+z)/(1+z_i)]$')
+ylabel('$r_I/r_S$')
+title('r_i(t)/r_s(t) vs redshift, q_0 =' + repr(q0))
+legend( ('computed', 'analytical'), loc=4 )
+grid()
+axis([ 0.0, 3.0, 0.0, 1.0 ])
+savefig('radius' + pictype)
+
+# i-front velocity vs redshift plot
+figure()
+xdata = -log10(z_ratio2)
+ydata1 = log10(v_ratio)
+ydata2 = log10(vanal_ratio)
+plot(xdata,ydata1,'b-',xdata,ydata2,'r--')
+xlabel('$-log[(1+z)/(1+z_i)]$')
+ylabel('$log[v/(r_{s,i}/t_i)]$')
+title('v_{pec}(t)/(r_{s,i}/t_i) vs redshift, q_0 =' + repr(q0))
+legend( ('computed', 'analytical') )
+grid()
+axis([ 0.0, 3.0, -0.5, 1.0 ])
+savefig('velocity' + pictype)
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/CosmoIonization_q05z4.enzo
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z4/CosmoIonization_q05z4.enzo
@@ -0,0 +1,43 @@
+#
+# Shapiro & Giroux, isothermal, cosmological I-front test
+#
+# Daniel R. Reynolds, reyn...@smu.edu
+#
+#############################
+#
+# problem and general modules
+#
+ProblemType = 415 // CosmoIonizationInitialize init
+RadiativeTransferFLD = 2 // use FLD solver for radiation
+ImplicitProblem = 1 // use gFLDProblem module
+ComovingCoordinates = 1 // Expansion ON
+UseHydro = 0 // no hydro
+Unigrid = 1 // unigrid run
+RadiativeTransferOpticallyThinH2 = 0 // no 1/r^2 LW background
+RadHydroParamfile = CosmoIonization_q05z4.gfld
+#
+# grid and boundary
+#
+TopGridRank = 3
+LeftFaceBoundaryCondition = 1 1 1 // outflow
+RightFaceBoundaryCondition = 1 1 1 // outflow
+TopGridDimensions = 16 16 16
+#
+# runtime and I/O
+#
+StopCycle = 999999
+dtDataDump = 0.0675
+ParallelRootGridIO = 1
+#
+# cosmology
+#
+CosmologyOmegaMatterNow = 0.1 // OmegaBaryonNow + OmegaCDMNow
+CosmologyOmegaLambdaNow = 0.0 // 1.0-OmegaMatterNow
+CosmologyHubbleConstantNow = 1.0 // [km/s/Mpc]
+CosmologyComovingBoxSize = 0.3 // 0.3 MPc [Mpc/h]
+CosmologyMaxExpansionRate = 0.015 // max delta(a)/a
+CosmologyInitialRedshift = 4.0
+CosmologyFinalRedshift = 1.0
+GravitationalConstant = 1
+
+#############################
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/Econtour_05.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z4/Econtour_05.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/HIcontour_05.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z4/HIcontour_05.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/make_preconditions
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z4/make_preconditions
@@ -0,0 +1,3 @@
+gmake hypre-yes
+gmake photon-yes
+gmake use-mpi-yes
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/profiles_01.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z4/profiles_01.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/radius.pdf
Binary file run/RadiationTransportFLD/CosmoIonization_q05z4/radius.pdf has changed
diff -r 00cdea5b78fb -r 7c420b8c801c run/RadiationTransportFLD/CosmoIonization_q05z4/sg_q05z4_makeplots.py
--- /dev/null
+++ b/run/RadiationTransportFLD/CosmoIonization_q05z4/sg_q05z4_makeplots.py
@@ -0,0 +1,291 @@
+# matplotlib-based plotting script for Shapiro & Giroux q=0.5 test
+# Daniel R. Reynolds, reyn...@smu.edu
+
+# imports
+from pylab import *
+
+
+# set the total number of snapshots
+te = 18
+
+# set the graphics output type
+pictype = '.png'
+
+# set some constants
+q0 = 0.05 # deceleration parameter
+Nph = 5.0e48 # ionization source strength [photons/sec]
+alpha2 = 2.52e-13 # recombination rate coefficient
+mp = 1.67262171e-24 # proton mass [g]
+Myr = 3.15576e13 # duration of a Megayear [sec]
+
+# initialize time-history outputs
+# row 1: i-front radius
+# row 2: stromgren sphere radius (rs)
+# row 3: redshift (z)
+# row 4: time (t)
+# row 5: i-front radius (analytical)
+# row 6: i-front velocity (analytical)
+rdata = zeros( (6, te+1), dtype=float);
+
+
+##########
+# define some helpful functions
+def get_params(file):
+ """Returns z0, z, xR, t0, H0, dUnit, tUnit, lUnit from a parameter files"""
+ import shlex
+ f = open(file)
+ for line in f:
+ text = shlex.split(line)
+ if ("CosmologyInitialRedshift" in text):
+ z0 = float(text[len(text)-1])
+ elif ("CosmologyCurrentRedshift" in text):
+ z = float(text[len(text)-1])
+ elif ("InitialTime" in text):
+ t0 = float(text[len(text)-1])
+ elif ("CosmologyHubbleConstantNow" in text):
+ H0 = float(text[len(text)-1])
+ f.close()
+ f = open(file + '.rtmodule' )
+ for line in f:
+ text = shlex.split(line)
+ if ("DensityUnits" in text):
+ dUnit = float(text[len(text)-1])
+ elif ("TimeUnits" in text):
+ tUnit = float(text[len(text)-1])
+ elif ("LengthUnits" in text):
+ lUnit = float(text[len(text)-1])
+ f.close()
+ xR = lUnit
+ t0 *= tUnit
+ H0 *= 100*1e5/3.0857e24 # H0 units given 100km/s/Mpc, convert to 1/s
+ return [z0, z, xR, t0, H0, dUnit, tUnit, lUnit]
+
+
+##########
+def analytical_solution(q0,Nph,aval):
+ """Analytical solution driver, returns rI, vI"""
+ import h5py
+ import numpy as np
+ import scipy.integrate as sp
+ z0, z, xR, t0, H0, dUnit, tUnit, lUnit = get_params('DD0000/data0000')
+ f = h5py.File('DD0000/data0000.cpu0000','r')
+ rho_data = f.get('/Grid00000001/Density')
+ rho = rho_data[0][0][0]*dUnit
+ del(rho_data)
+ mp = 1.67262171e-24
+ # initial nH: no need to scale by a, since a(z0)=1, but we do
+ # need to accomodate for Helium in analytical soln
+# nH0 = rho/mp*0.76
+ nH0 = rho/mp
+
+ # We first set the parameter lamda = chi_{eff} alpha2 cl n_{H,0} t0, where
+ # chi_{eff} = correction for presence of He atoms [1 -- no correction]
+ # alpha2 = Hydrogen recombination coefficient [2.6e-13 -- case B]
+ # cl = the gas clumping factor [1 -- homogeneous medium]
+ # n_{H,0} = initial Hydrogen number density
+ # t0 = initial time
+ alpha2 = 2.52e-13
+ lamda = alpha2*nH0*t0
+
+ # Compute the initial Stromgren radius, rs0 (proper, CGS units)
+ rs0 = (Nph*3.0/4.0/pi/alpha2/nH0/nH0)**(1.0/3.0) # no rescaling since a(z0)=1
+
+ # We have the general formula for y(t):
+ # y(t) = (lamda/xi)exp(-tau(t)) integral_{1}^{a(t)} [da'
+ # exp(t(a'))/sqrt(1-2q0 + 2q0(1+z0)/a')] , where
+ # xi = H0*t0*(1+z0),
+ # H0 = Hubble constant
+ # tau(a) = (lamda/xi)*[F(a)-F(1)]/[3(2q0)^2(1+z0)^2/2],
+ # F(a) = [2(1-2q0) - 2q0(1+z0)/a]*sqrt(1-2q0+2q0(1+z0)/a)
+ #
+ # Here, a' is the variable of integration, not the time-derivative of a.
+ F1 = (2.0*(1.0-2.0*q0) - 2.0*q0*(1.0+z0))*sqrt(1.0-2.0*q0+2.0*q0*(1.0+z0))
+ xi = H0*t0*(1.0+z0)
+
+ # set integration nodes/values (lots)
+ inodes = 1000001
+ a = linspace(1,aval,inodes)
+ integrand = zeros(inodes, dtype=float)
+ arat = divide(2.0*q0*(1.0+z0), a)
+ sqa = sqrt(add(1.0-2.0*q0, arat))
+ afac = subtract(2*(1-2*q0), arat)
+ arg1 = subtract(afac*sqa, F1)
+ arg2 = exp(multiply((lamda/xi)/(6*q0*q0*(1+z0)*(1+z0)), arg1))
+ integrand = divide(arg2,sqa)
+
+ # perform numerical integral via composite Simpson's rule
+ numint = sp.simps(integrand, a)
+ tauval = (lamda/xi)*((2*(1-2*q0) - 2*q0*(1+z0)/aval)*sqrt(1-2*q0+2*q0*(1+z0)/aval)-F1)/(6*q0*q0*(1+z0)*(1+z0))
+ y = lamda/xi*exp(-tauval)*numint;
+
+ # extract the current Stromgren radius and velocity
+ ythird = sign(y)*abs(y)**(1.0/3.0);
+ rI = ythird/aval # compute ratio rI/rS
+ vI = (lamda/3)*aval/ythird*ythird*(1.0-y/aval**3);
+ return [rI, vI]
+
+
+
+##########
+def load_vals(tdump):
+ """Returns t, z, xR, nH, Eg, xHI, xHII from a given data dump"""
+ import h5py
+ import numpy as np
+ sdump = repr(tdump).zfill(4)
+ pfile = 'DD' + sdump + '/data' + sdump
+ hfile = pfile + '.cpu0000'
+ z0, z, xR, tval, H0, dUnit, tUnit, lUnit = get_params(pfile)
+ f = h5py.File(hfile,'r')
+ Eg = f.get('/Grid00000001/Grey_Radiation_Energy')
+ HI = f.get('/Grid00000001/HI_Density')
+ HII = f.get('/Grid00000001/HII_Density')
+ rho = f.get('/Grid00000001/Density')
+ # add floor values for happy numerics
+ HI = np.add(HI, 1.0e-10)
+ HII = np.add(HII, 1.0e-10)
+ Eg = np.add(Eg, 1.0e-30)
+ HIfrac = np.divide(HI,rho)
+ HIIfrac = np.divide(HII,rho)
+ Eg = np.multiply(Eg,dUnit*lUnit*lUnit/tUnit/tUnit)
+ nH = rho[0][0][0]*dUnit/1.67262171e-24
+ return [tval, z, xR, nH, Eg, HIfrac, HIIfrac]
+
+##########
+
+
+
+
+# loop over snapshots, loading values and times
+for tstep in range(te+1):
+
+ # load relevant information
+ t, z, xR, nH, Eg, xHI, xHII = load_vals(tstep)
+
+ # compute current Stromgren radius
+ rs = (Nph*3.0/4.0/pi/alpha2/nH/nH)**(1.0/3.0)
+
+ # store initial hydrogen number density
+ if (tstep == 0):
+ ti = t
+ zi = z
+ nHi = nH
+ rsi = rs
+
+ # compute volume element
+ nx, ny, nz = Eg.shape
+ dV = xR*xR*xR/nx/ny/nz
+
+ # compute I-front radius (assuming spherical)
+ HIIvolume = sum(xHII)*dV*8.0
+ rloc = (3.0/4.0*HIIvolume/pi)**(1.0/3.0)
+
+ # get analytical solutions for i-front position and velocity
+ a = (1.0+zi)/(1.0+z) # paper's version of a
+ ranal, vanal = analytical_solution(q0,Nph,a)
+
+ # store data
+ rdata[0][tstep] = rloc
+ rdata[1][tstep] = rs
+ rdata[2][tstep] = z
+ rdata[3][tstep] = t
+ rdata[4][tstep] = ranal
+ rdata[5][tstep] = vanal
+
+ # generate 2D plots at certain times
+ if (tstep == 1) or (tstep == 5) or (tstep == 10):
+
+ # set mesh
+ x = linspace(0.0,1.0,nx)
+ y = linspace(0.0,1.0,ny)
+ X, Y = meshgrid(x,y)
+
+ # xHI slice through z=0
+ figure()
+ sl = log10(xHI[:][:][0])
+ h = imshow(sl, hold=False, extent=(0.0, 1.0, 0.0, 1.0), origin='lower')
+ colorbar(h)
+ title('log HI fraction, z = ' + repr(z))
+ savefig('HIcontour_' + repr(tstep).zfill(2) + pictype)
+
+ # Eg slice through z=0
+ figure()
+ sl = log10(Eg[:][:][0])
+ h = imshow(sl, hold=False, extent=(0.0, 1.0, 0.0, 1.0), origin='lower')
+ colorbar(h)
+ title('log radiation density, z = ' + repr(z))
+ savefig('Econtour_' + repr(tstep).zfill(2) + pictype)
+
+ # spherically-averaged profiles for xHI, xHII
+ Nradii = nx*3/2
+ Hradii = linspace(0.0,sqrt(3.0),Nradii)
+ rad_idx = zeros( (nx,ny,nz), dtype=float)
+ for k in range(nz):
+ zloc = (k+0.5)/nz
+ for j in range(ny):
+ yloc = (j+0.5)/ny
+ for i in range(nx):
+ xloc = (i+0.5)/nx
+ rad_idx[i][j][k] = max(0,floor(sqrt(xloc*xloc + yloc*yloc + zloc*zloc)/sqrt(3.0)*Nradii))
+ Hcount = 1.0e-16*ones(Nradii)
+ HIprof = zeros(Nradii, dtype=float)
+ HIIprof = zeros(Nradii, dtype=float)
+ for k in range(nz):
+ for j in range(ny):
+ for i in range(nx):
+ idx = rad_idx[i][j][k]
+ HIprof[idx] += xHI[i][j][k]
+ HIIprof[idx] += xHII[i][j][k]
+ Hcount[idx] += 1
+ HIprof = log10(HIprof/Hcount)
+ HIIprof = log10(HIIprof/Hcount)
+ figure()
+ plot(Hradii,HIprof,'b-',Hradii,HIIprof,'r--')
+ grid()
+ xlabel('$r/L_{box}$')
+ ylabel('log(xHI), log(xHII)')
+ title('HI, HII Profiles, z = ' + repr(z))
+ legend( ('xHI','xHII') )
+ axis([ 0.0, 1.2, -7.0, 1.0 ])
+ savefig('profiles_' + repr(tstep).zfill(2) + pictype)
+
+
+# I-front radius/velocity plots vs analytical solutions
+# scaled I-front velocity
+v_ratio = (rdata[0][2:te+1]-rdata[0][1:te])/(rdata[3][2:te+1]-rdata[3][1:te])/(rsi/ti)
+vanal_ratio = (rdata[5][2:te+1]+rdata[5][1:te])*0.5
+
+# scaled i-front position
+r_ratio = rdata[0]/rdata[1]
+ranal_ratio = rdata[4]
+
+# scaled redshift (cell centsteprs)
+z_ratio = (1.0 + rdata[2])/(1.0+zi)
+
+# scaled redshift2 (cell faces)
+z_ratio2 = (1.0 + rdata[2][2:te+1])/(1.0+zi)
+
+# i-front position vs redshift plot
+figure()
+xdata = -log10(z_ratio)
+plot(xdata,r_ratio,'b-',xdata,ranal_ratio,'r--')
+xlabel('$-log[(1+z)/(1+z_i)]$')
+ylabel('$r_I/r_S$')
+title('r_i(t)/r_s(t) vs redshift, q_0 =' + repr(q0))
+legend( ('computed', 'analytical'), loc=4 )
+grid()
+axis([ 0.0, 3.0, 0.0, 1.0 ])
+savefig('radius' + pictype)
+
+# i-front velocity vs redshift plot
+figure()
+xdata = -log10(z_ratio2)
+ydata1 = log10(v_ratio)
+ydata2 = log10(vanal_ratio)
+plot(xdata,ydata1,'b-',xdata,ydata2,'r--')
+xlabel('$-log[(1+z)/(1+z_i)]$')
+ylabel('$log[v/(r_{s,i}/t_i)]$')
+title('v_{pec}(t)/(r_{s,i}/t_i) vs redshift, q_0 =' + repr(q0))
+legend( ('computed', 'analytical') )
+grid()
+axis([ 0.0, 3.0, -0.5, 1.0 ])
+savefig('velocity' + pictype)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/enzo/enzo-dev/commits/c4aa82d4aefe/
Changeset: c4aa82d4aefe
Branch: week-of-code
User: pgrete
Date: 2016-08-05 12:44:58+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 5 files
diff -r 7c420b8c801c -r c4aa82d4aefe src/enzo/Grid_GrackleWrapper.C
--- a/src/enzo/Grid_GrackleWrapper.C
+++ b/src/enzo/Grid_GrackleWrapper.C
@@ -141,7 +141,7 @@
/* If both metal fields (Pop I/II and III) exist, create a field
that contains their sum */
- float *MetalPointer;
+ float *MetalPointer = NULL;
float *TotalMetals = NULL;
if (MetalNum != -1 && SNColourNum != -1) {
diff -r 7c420b8c801c -r c4aa82d4aefe src/enzo/SetDefaultGlobalValues.C
--- a/src/enzo/SetDefaultGlobalValues.C
+++ b/src/enzo/SetDefaultGlobalValues.C
@@ -950,7 +950,6 @@
MHDCTDualEnergyMethod = INT_UNDEFINED;
MHDCTPowellSource = 0;
MHDCTUseSpecificEnergy = TRUE;
- FixedTimestep = -1.0;
WriteBoundary = FALSE;
CT_AthenaDissipation = 0.1;
MHD_WriteElectric = TRUE;
diff -r 7c420b8c801c -r c4aa82d4aefe src/enzo/SetLevelTimeStep.C
--- a/src/enzo/SetLevelTimeStep.C
+++ b/src/enzo/SetLevelTimeStep.C
@@ -78,10 +78,12 @@
AnisotropicConduction = my_anisotropic_conduction;
float dt_conduction;
+ float dt_cond_temp;
dt_conduction = huge_number;
for (grid1 = 0; grid1 < NumberOfGrids; grid1++) {
- if (Grids[grid1]->GridData->ComputeConductionTimeStep(dt_conduction) == FAIL)
+ if (Grids[grid1]->GridData->ComputeConductionTimeStep(dt_cond_temp) == FAIL)
ENZO_FAIL("Error in ComputeConductionTimeStep.\n");
+ dt_conduction = min(dt_conduction,dt_cond_temp);
}
dt_conduction = CommunicationMinValue(dt_conduction);
dt_conduction *= float(NumberOfGhostZones); // for subcycling
diff -r 7c420b8c801c -r c4aa82d4aefe src/enzo/WriteParameterFile.C
--- a/src/enzo/WriteParameterFile.C
+++ b/src/enzo/WriteParameterFile.C
@@ -1192,7 +1192,7 @@
//MHDCT variables
fprintf(fptr, "MHDCTSlopeLimiter = %"ISYM"\n", MHDCTSlopeLimiter);
fprintf(fptr, "MHDCTDualEnergyMethod = %"ISYM"\n", MHDCTDualEnergyMethod);
- fprintf(fptr, "MHDPowellSource = %"ISYM"\n", MHDCTPowellSource);
+ fprintf(fptr, "MHDCTPowellSource = %"ISYM"\n", MHDCTPowellSource);
fprintf(fptr, "WriteBoundary = %"ISYM"\n", WriteBoundary);
fprintf(fptr,"CT_AthenaDissipation =%"GSYM"\n",CT_AthenaDissipation);
fprintf(fptr,"MHD_WriteElectric =%"ISYM"\n",MHD_WriteElectric);
@@ -1200,7 +1200,6 @@
fprintf(fptr,"MHD_CT_Method =%"ISYM"\n",MHD_CT_Method);
fprintf(fptr,"NumberOfGhostZones =%"ISYM"\n",NumberOfGhostZones);
fprintf(fptr,"IsothermalSoundSpeed =%"GSYM"\n",IsothermalSoundSpeed);
- fprintf(fptr,"FixedTimestep =%"GSYM"\n",FixedTimestep);
fprintf(fptr,"MHD_ProjectB =%"ISYM"\n",MHD_ProjectB);
fprintf(fptr,"MHD_ProjectE =%"ISYM"\n",MHD_ProjectE);
fprintf(fptr,"EquationOfState =%"ISYM"\n",EquationOfState);
diff -r 7c420b8c801c -r c4aa82d4aefe src/enzo/global_data.h
--- a/src/enzo/global_data.h
+++ b/src/enzo/global_data.h
@@ -1103,7 +1103,6 @@
EXTERN int MHDCTDualEnergyMethod;
EXTERN int MHDCTPowellSource;
EXTERN int MHDCTUseSpecificEnergy;
-EXTERN float FixedTimestep;
EXTERN int WriteBoundary;
EXTERN int WriteAcceleration;
EXTERN int TracerParticlesAddToRestart;// forces addition of tracer particles to already initialized simulations
https://bitbucket.org/enzo/enzo-dev/commits/4ccd34abf5a5/
Changeset: 4ccd34abf5a5
Branch: week-of-code
User: pgrete
Date: 2016-08-10 13:56:13+00:00
Summary: SGS framework. Updated code documentation
Affected #: 8 files
diff -r c4aa82d4aefe -r 4ccd34abf5a5 README
--- a/README
+++ b/README
@@ -51,7 +51,7 @@
* Philipp Edelmann pede...@mpa-garching.mpg.de
* Andrew Emerich aemer...@gmail.com
* Nathan Goldbaum ngol...@ucsc.edu
- * Phillipp Grete pgr...@astro.physik.uni-goettingen.de
+ * Philipp Grete pgr...@astro.physik.uni-goettingen.de
* John Forbes jcfo...@ucsc.edu
* Oliver Hahn ha...@phys.ethz.ch
* Robert Harkness hark...@sdsc.edu
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/Grid.h
--- a/src/enzo/Grid.h
+++ b/src/enzo/Grid.h
@@ -2392,11 +2392,13 @@
int FTStochasticForcing(int FieldDim); // WS
- // pgrete: Jacobians to be used in SGS model
+
+ /* START Subgrid-scale modeling framework by P. Grete */
+
+ // Jacobians to be used in SGS model
float *JacVel[MAX_DIMENSION][MAX_DIMENSION];
float *JacB[MAX_DIMENSION][MAX_DIMENSION];
-
float *FilteredFields[7]; // filtered fields: rho, xyz-vel, Bxyz
// the scale-similarity model needs mixed filtered quantities
@@ -2407,8 +2409,12 @@
int SGSUtil_ComputeJacobian(float *Jac[][MAX_DIMENSION],float* field1,float* field2,float* field3);
int SGSUtil_ComputeMixedFilteredQuantities();
int SGSUtil_FilterFields();
+
+ // the general functions that add the SGS terms to the dynamic eqns.
int SGSAddEMFTerms(float **dU);
int SGSAddMomentumTerms(float **dU);
+
+ // the different SGS models
void SGSAddEMFERS2J2Term(float **EMF);
void SGSAddEMFERS2M2StarTerm(float **EMF);
void SGSAddEMFNLemfComprTerm(float **EMF);
@@ -2419,6 +2425,8 @@
void SGSAddTauSSuTerm(float **Tau);
void SGSAddTauSSbTerm(float **Tau);
void SGSAddEMFSSTerm(float **EMF);
+
+ /* END Subgrid-scale modeling framework by P. Grete */
/* Comoving coordinate expansion terms. */
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/Grid_SGSAddEMFTerms.C
--- a/src/enzo/Grid_SGSAddEMFTerms.C
+++ b/src/enzo/Grid_SGSAddEMFTerms.C
@@ -1,3 +1,24 @@
+/***********************************************************************
+ *
+ * INFORMATION This file is part of a subgrid-scale (SGS) modeling
+ * framework in order to conduct explicit large eddy simulations (LES).
+ *
+ * The functions in this file concern models for the turbulent
+ * electromotive force (EMF) in the induction equation.
+ *
+ * The models have been verified "a priori", i.e. in comparison to
+ * reference data, in
+ * Grete et al 2015 New J. Phys. 17 023070 doi: 10.1088/1367-2630/17/2/023070
+ * Grete et al 2016 Phys. Plasmas 23 062317 doi: 10.1063/1.4954304 (Grete2016a)
+ * and "a posteriori", i.e. used in simulations of decaying MHD turbulence, in
+ * Grete et al ... under review ... (Grete201X)
+ *
+ * WRITTEN BY Philipp Grete (ma...@pgrete.de)
+ *
+ * DATE 2016
+ *
+************************************************************************/
+
#include "preincludes.h"
#include "macros_and_parameters.h"
#include "typedefs.h"
@@ -6,9 +27,15 @@
#include "GridList.h"
#include "ExternalBoundary.h"
#include "Grid.h"
-/* pure (unscaled) full (compressible) nonlinear model
+
+/*
+ * This function adds to the EMF
+ * the pure (unscaled) full (compressible) nonlinear model:
* EMF = 1/12 * Delta^2 * eps_ijk * (u_j,l * B_k,l - (ln rho),l u_j,l B_k)
- * see eq TODO of TODO for details
+ *
+ * See equation (37) in Grete2016a for details (such as coefficient values)
+ * or Vlaykov et al 2016 Phys. Plasmas 23 062316 doi: 10.1063/1.4954303 for
+ * the derivation.
*/
void grid::SGSAddEMFNLemfComprTerm(float **EMF) {
if (debug)
@@ -20,11 +47,17 @@
TENum, B1Num, B2Num, B3Num, PhiNum);
float *rho, *Bx, *By, *Bz;
+
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C)
if (SGSFilterWidth > 1.) {
rho = FilteredFields[0];
Bx = FilteredFields[4];
By = FilteredFields[5];
Bz = FilteredFields[6];
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
} else {
rho = BaryonField[DensNum];
Bx = BaryonField[B1Num];
@@ -46,6 +79,7 @@
}
+ // the combined prefactor
float CDeltaSqr = 1./12. * SGScoeffNLemfCompr * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
@@ -94,22 +128,32 @@
}
-/* eddy resistivity model scaled by Smagorinsky energies
- * EMF = -C * Delta^2 * sqrt(|S|^2 + |J|^2/rho) * J
- * see eq TODO of TODO for details
+/*
+ * This function adds to the EMF
+ * an eddy resistivity model where the strength of anomalous resistivity is
+ * scaled by the total SGS energy as given by a model based on
+ * realizability conditions:
+ * EMF = -C * Delta^2 * sqrt(|S*|^2 + |M|^2/rho) * J
+ *
+ * See equation (23) and (13) in Grete2016a for details (such as coefficient values)
*/
-void grid::SGSAddEMFERS2J2Term(float **EMF) {
+void grid::SGSAddEMFERS2M2StarTerm(float **EMF) {
if (debug)
- printf("[%"ISYM"] grid::SGSAddEMFERS2J2Term start\n",MyProcessorNumber);
+ printf("[%"ISYM"] grid::SGSAddEMFERS2M2StarTerm start\n",MyProcessorNumber);
int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
int B1Num, B2Num, B3Num, PhiNum;
this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
TENum, B1Num, B2Num, B3Num, PhiNum);
- float* rho;
+ float* rho;
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C and the SGSNeedJacobians switch)
if (SGSFilterWidth > 1.) {
rho = FilteredFields[0];
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
} else {
rho = BaryonField[DensNum];
}
@@ -128,91 +172,14 @@
}
- float MinusCDeltaSqr = -SGScoeffERS2J2 * pow(SGSFilterWidth,2.) *
- pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
-
- int igrid;
- float sqrtS2plusJ2overRho;
-
- for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
- for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
- for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
-
- igrid = i + (j+k*GridDimension[1])*GridDimension[0];
-
- sqrtS2plusJ2overRho = pow(
- 2.*(pow(JacVel[X][X][igrid],2.) +
- pow(JacVel[Y][Y][igrid],2.) +
- pow(JacVel[Z][Z][igrid],2.)
- )
- + pow(JacVel[X][Y][igrid] + JacVel[Y][X][igrid],2.)
- + pow(JacVel[Y][Z][igrid] + JacVel[Z][Y][igrid],2.)
- + pow(JacVel[X][Z][igrid] + JacVel[Z][X][igrid],2.)
- + (pow(JacB[Z][Y][igrid] - JacB[Y][Z][igrid],2.) +
- pow(JacB[X][Z][igrid] - JacB[Z][X][igrid],2.) +
- pow(JacB[Y][X][igrid] - JacB[X][Y][igrid],2.)
- )/rho[igrid],1./2.);
-
- EMF[X][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
- (JacB[Z][Y][igrid] - JacB[Y][Z][igrid]);
- EMF[Y][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
- (JacB[X][Z][igrid] - JacB[Z][X][igrid]);
- EMF[Z][igrid] += MinusCDeltaSqr * sqrtS2plusJ2overRho *
- (JacB[Y][X][igrid] - JacB[X][Y][igrid]);
-
- }
-
-}
-
-/* eddy resistivity model scaled by realiz. energies
- * EMF = -C * Delta^2 * sqrt(|S*|^2 + |M|^2/rho) * J
- * see eq TODO of TODO for details
- */
-void grid::SGSAddEMFERS2M2StarTerm(float **EMF) {
- if (debug)
- printf("[%"ISYM"] grid::SGSAddEMFERS2M2StarTerm start\n",MyProcessorNumber);
-
- int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
- int B1Num, B2Num, B3Num, PhiNum;
- this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num, Vel3Num,
- TENum, B1Num, B2Num, B3Num, PhiNum);
-
- float* rho;
- if (SGSFilterWidth > 1.) {
- rho = FilteredFields[0];
- } else {
- rho = BaryonField[DensNum];
- }
-
- int size = 1;
- int StartIndex[MAX_DIMENSION];
- int EndIndex[MAX_DIMENSION];
-
- for (int dim = 0; dim < MAX_DIMENSION; dim++) {
- size *= GridDimension[dim];
-
- /* we need the EMF in the first ghost zone as well
- * as we'll take another derivative later on */
- StartIndex[dim] = GridStartIndex[dim] - 1;
- EndIndex[dim] = GridEndIndex[dim] + 1;
- }
-
-
+ // the combined prefactor
float MinusCDeltaSqr = -SGScoeffERS2M2Star * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
int igrid;
- /* magic with S |S| S*... could potentially handled by
- * external function, should reduce CPU time, but increase memory usage
- */
float traceSthird, traceMthird;
float sqrtS2StarplusM2overRho;
- /* just for fun: how accurate is Dedner
- * we count the number of cells where divB is dynamically important
- * divB * Delta / |B| > 1.
- */
- int divBerror = 0;
for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
@@ -223,14 +190,6 @@
traceSthird = (JacVel[X][X][igrid] + JacVel[Y][Y][igrid] + JacVel[Z][Z][igrid])/3.;
traceMthird = (JacB[X][X][igrid] + JacB[Y][Y][igrid] + JacB[Z][Z][igrid])/3.;
- if (debug && (traceMthird*pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.)*3./pow(
- BaryonField[B1Num][igrid]*BaryonField[B1Num][igrid] +
- BaryonField[B2Num][igrid]*BaryonField[B2Num][igrid] +
- BaryonField[B3Num][igrid]*BaryonField[B3Num][igrid],1./2.) > 1.)) {
- divBerror++;
- }
-
-
sqrtS2StarplusM2overRho = pow(
2.*(pow(JacVel[X][X][igrid]-traceSthird,2.) +
pow(JacVel[Y][Y][igrid]-traceSthird,2.) +
@@ -256,16 +215,14 @@
(JacB[Y][X][igrid] - JacB[X][Y][igrid]);
}
- if (debug)
- printf("[%"ISYM"] grid::SGSAddEMFERS2M2StarTerm divB error total: %"ISYM" |\%: %"FSYM"\n",
- MyProcessorNumber,divBerror,
- (float) divBerror / (float)((EndIndex[0] + 1 - StartIndex[0])*(EndIndex[1] + 1 - StartIndex[1])*(EndIndex[2] + 1 - StartIndex[2])));
-
}
-/* scale-similarity model
+/*
+ * This function adds to the EMF
+ * a scale-similarity motivated term:
* EMF = flt(u x B) - flt(u) x flt(B)
- * see eq TODO of TODO for details
+ *
+ * See equation (32) in Grete2016a for details (such as coefficient values)
*/
void grid::SGSAddEMFSSTerm(float **EMF) {
if (debug)
@@ -302,11 +259,16 @@
EMF[Z][igrid] += SGScoeffSSemf * FltUB[Z][igrid] - (
FilteredFields[1][igrid] * FilteredFields[5][igrid] -
FilteredFields[2][igrid] * FilteredFields[4][igrid]);
-
}
-
}
+
+/*
+ * This function initializes a zero EMF and calls the individual
+ * functions that add the different terms to the EMF.
+ * Finally, the curl of the EMF is added to the dU vector used by
+ * the MUSCL framework in hydro_rk/Grid_MHDSourceTerms.C
+ */
int grid::SGSAddEMFTerms(float **dU) {
if (ProcessorNumber != MyProcessorNumber) {
return SUCCESS;
@@ -337,9 +299,8 @@
}
- if (SGScoeffERS2J2 != 0.)
- SGSAddEMFERS2J2Term(EMF);
+ // the individual terms are added/activated by a non-zero coefficient
if (SGScoeffERS2M2Star != 0.)
SGSAddEMFERS2M2StarTerm(EMF);
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/Grid_SGSAddMomentumTerms.C
--- a/src/enzo/Grid_SGSAddMomentumTerms.C
+++ b/src/enzo/Grid_SGSAddMomentumTerms.C
@@ -1,3 +1,26 @@
+/***********************************************************************
+ *
+ * INFORMATION This file is part of a subgrid-scale (SGS) modeling
+ * framework in order to conduct explicit large eddy simulations (LES).
+ *
+ * The functions in this file concern models for the turbulent
+ * stress tensor in the momentum equation.
+ * It consists of the turbulent (or SGS) Reynolds stress, the SGS
+ * Maxwell stress, and the SGS magnetic pressure.
+ *
+ * The models have been verified "a priori", i.e. in comparison to
+ * reference data, in
+ * Grete et al 2015 New J. Phys. 17 023070 doi: 10.1088/1367-2630/17/2/023070
+ * Grete et al 2016 Phys. Plasmas 23 062317 doi: 10.1063/1.4954304 (Grete2016a)
+ * and "a posteriori", i.e. used in simulations of decaying MHD turbulence, in
+ * Grete et al ... under review ... (Grete201X)
+ *
+ * WRITTEN BY Philipp Grete (ma...@pgrete.de)
+ *
+ * DATE 2016
+ *
+************************************************************************/
+
#include "preincludes.h"
#include "macros_and_parameters.h"
#include "typedefs.h"
@@ -7,9 +30,14 @@
#include "ExternalBoundary.h"
#include "Grid.h"
-/* pure (unscaled) nonlinear model for TauU (full)
+/*
+ * This function adds to the SGS stress tensor (Reynolds stress component)
+ * the pure (unscaled) nonlinear model
* TauU = 1/12 * Delta^2 rho u_i,k u_j,k
- * see eq TODO of TODO for details
+ *
+ * See equation (35) in Grete2016a for details (such as coefficient values)
+ * or Vlaykov et al 2016 Phys. Plasmas 23 062316 doi: 10.1063/1.4954303 for
+ * the derivation.
*/
void grid::SGSAddTauNLuTerm(float **Tau) {
if (debug)
@@ -21,8 +49,13 @@
TENum, B1Num, B2Num, B3Num, PhiNum);
float* rho;
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C and the SGSNeedJacobians switch)
if (SGSFilterWidth > 1.) {
rho = FilteredFields[0];
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
} else {
rho = BaryonField[DensNum];
}
@@ -41,6 +74,7 @@
}
+ // the combined prefactor
float CDeltaSqr = 1./12. * SGScoeffNLu * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
@@ -65,9 +99,13 @@
}
-/* nonlinear model for TauU (full) and scaled by realiz. energy
+/*
+ * This function adds to the SGS stress tensor (Reynolds stress component)
+ * the scaled nonlinear model (magnitude is scaled by an kinetic SGS energy
+ * model based on realizability conditions): *
* TauU = 2 C Delta^2 rho |S*|^2 (u_i,k u_j,k)/(u_l,s u_l,s)
- * see eq TODO of TODO for details
+ *
+ * See equation (43) and (13) in Grete2016a for details (such as coefficient values)
*/
void grid::SGSAddTauNLuNormedEnS2StarTerm(float **Tau) {
if (debug)
@@ -79,8 +117,13 @@
TENum, B1Num, B2Num, B3Num, PhiNum);
float* rho;
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C and the SGSNeedJacobians switch)
if (SGSFilterWidth > 1.) {
rho = FilteredFields[0];
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
} else {
rho = BaryonField[DensNum];
}
@@ -98,7 +141,7 @@
EndIndex[dim] = GridEndIndex[dim] + 1;
}
-
+ // the combined prefactor
float TwoCDeltaSqr = 2. * SGScoeffNLuNormedEnS2Star * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
float traceSthird;
@@ -142,12 +185,16 @@
}
}
-
}
-/* pure (unscaled) nonlinear model for TauB (full)
+/*
+ * This function adds to the SGS stress tensor (Maxwell stress component)
+ * the pure (unscaled) nonlinear model for TauB (full)
* TauB = 1/12 * Delta^2 B_i,k B_j,k
- * see eq TODO of TODO for details
+ *
+ * See equation (36) in Grete2016a for details (such as coefficient values)
+ * or Vlaykov et al 2016 Phys. Plasmas 23 062316 doi: 10.1063/1.4954303 for
+ * the derivation.
*/
void grid::SGSAddTauNLbTerm(float **Tau) {
if (debug)
@@ -167,6 +214,7 @@
}
+ // the combined prefactor
float CDeltaSqr = 1./12. * SGScoeffNLb * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
@@ -180,6 +228,8 @@
igrid = i + (j+k*GridDimension[1])*GridDimension[0];
turbMagPres = 0.;
+
+ // the pure Maxwell stress component
for (int l = 0; l < MAX_DIMENSION; l++) {
turbMagPres += JacB[X][l][igrid] * JacB[X][l][igrid]
+ JacB[Y][l][igrid] * JacB[Y][l][igrid]
@@ -193,18 +243,24 @@
Tau[XZ][igrid] -= CDeltaSqr * JacB[X][l][igrid] * JacB[Z][l][igrid];
}
+ // the turbulent magnetic pressure component
Tau[XX][igrid] += CDeltaSqr * turbMagPres/2.;
Tau[YY][igrid] += CDeltaSqr * turbMagPres/2.;
Tau[ZZ][igrid] += CDeltaSqr * turbMagPres/2.;
}
-
}
-/* eddy viscosity model for full tau scaled by realiz. energies
+/*
+ * This function adds to the SGS stress tensor (Reynolds stress component)
+ * the eddy viscosity model where the strength of eddy turbulent viscosity is
+ * scaled by the kinetic SGS energy as given by a model based on
+ * realizability conditions:
* Tau = -2 C_1 Delta^2 rho |S*| S* + 2/3 C_2 delta_ij Delta^2 rho |S*|^2
- * see eq TODO of TODO for details
+ *
+ * See equation (10), (21) and (13) in Grete2016a for details (such as coefficient
+ * values) or (in practice) equations (8), (10) and (12) in Grete201X.
*/
void grid::SGSAddTauEVEnS2StarTerm(float **Tau) {
if (debug)
@@ -217,7 +273,12 @@
float* rho;
if (SGSFilterWidth > 1.) {
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C and the SGSNeedJacobians switch)
rho = FilteredFields[0];
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
} else {
rho = BaryonField[DensNum];
}
@@ -236,16 +297,14 @@
}
+ // the combined prefactor of the deviatoric part
float Minus2C1DeltaSqr = -2. * SGScoeffEVStarEnS2Star * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
+ // the combined prefactor of the isotropic part
float TwoThirdC2DeltaSqr = 2./3. * SGScoeffEnS2StarTrace * pow(SGSFilterWidth,2.) *
pow(CellWidth[0][0]*CellWidth[1][0]*CellWidth[2][0],2./3.);
int igrid;
-
- /* magic with S |S| S*... could potentially handled by
- * external function, should reduce CPU time, but increase memory usage
- */
float traceSthird;
float SStarSqr;
@@ -284,9 +343,12 @@
}
}
-/* scale-similarity model for TauU
+/*
+ * This function adds to the SGS stress tensor (Reynolds stress component)
+ * a scale-similarity motivated term
* TauU = flt(rho) * (flt(u_i u_j) - flt(u_i) * flt(u_j))
- * see eq TODO of TODO for details
+ *
+ * See equation (30) in Grete2016a for details (such as coefficient values)
*/
void grid::SGSAddTauSSuTerm(float **Tau) {
if (debug)
@@ -331,9 +393,12 @@
}
-/* scale-similarity model for TauB
+/*
+ * This function adds to the SGS stress tensor (Maxwell stress component)
+ * a scale-similarity motivated term
* TauU = (flt(B_i B_j) - flt(B_i) * flt(B_j))
- * see eq TODO of TODO for details
+ *
+ * See equation (31) in Grete2016a for details (such as coefficient values)
*/
void grid::SGSAddTauSSbTerm(float **Tau) {
if (debug)
@@ -378,6 +443,12 @@
}
+/*
+ * This function initializes a zero stress tensor and calls the individual
+ * functions that add the different terms to it.
+ * Finally, the divergence of the tensor is added to the dU vector used by
+ * the MUSCL framework in hydro_rk/Grid_MHDSourceTerms.C
+ */
int grid::SGSAddMomentumTerms(float **dU) {
if (ProcessorNumber != MyProcessorNumber) {
return SUCCESS;
@@ -395,9 +466,14 @@
TENum, B1Num, B2Num, B3Num, PhiNum);
float* rho;
+ // if an explicit filter should be used
+ // (at this point the fields are already filtered,
+ // see hydro_rk/Grid_MHDSourceTerms.C and the SGSNeedJacobians switch)
if (SGSFilterWidth > 1.) {
rho = FilteredFields[0];
} else {
+ // if the model should be calculated based on grid-scale quantities
+ // (not recommended, see Grete201X)
rho = BaryonField[DensNum];
}
@@ -415,6 +491,7 @@
}
+ // the individual terms are added/activated by a non-zero coefficient
if (SGScoeffNLu != 0.)
SGSAddTauNLuTerm(Tau);
@@ -479,10 +556,6 @@
dU[iEtot][n] += EtotIncr;
}
- if (debug)
- printf("[%"ISYM"] grid::SGSAddMomentumTerms end, last incr: %"FSYM" %"FSYM" %"FSYM" %"FSYM"\n",
- MyProcessorNumber,MomxIncr,MomyIncr,MomzIncr,EtotIncr);
-
for (int dim = 0; dim < 6; dim++) {
delete [] Tau[dim];
}
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/Grid_SGSUtilities.C
--- a/src/enzo/Grid_SGSUtilities.C
+++ b/src/enzo/Grid_SGSUtilities.C
@@ -1,3 +1,17 @@
+/***********************************************************************
+ *
+ * INFORMATION This file is part of a subgrid-scale (SGS) modeling
+ * framework in order to conduct explicit large eddy simulations (LES).
+ *
+ * The functions in this file are considered utility functions and
+ * concern the calculation of Jacobians and explicit filtering operations.
+ *
+ * WRITTEN BY Philipp Grete (ma...@pgrete.de)
+ *
+ * DATE 2016
+ *
+************************************************************************/
+
#include "preincludes.h"
#include <stdlib.h>
#include "macros_and_parameters.h"
@@ -8,7 +22,26 @@
#include "ExternalBoundary.h"
#include "Grid.h"
-
+/*
+ * This function conducts an explicit filtering operation on the
+ * primary quantities.
+ * The original fields remain untouched and the result is stored in the
+ * FilteredFields array.
+ * Mass-weighted filtering applies to the velocity field.
+ * The multi-dimensional filtering operation is constructed by sequential
+ * application of the one-dimensional filter.
+ *
+ * At this point, this function is very flexible with respect to the
+ * discrete filter definition (both in effective width and weights).
+ * However, this also makes the nested for-loops less efficient, but the
+ * overhead is reasonable (see e.g. Grete201X ...)
+ *
+ * For additional information on how to construct multi-dimensional
+ * discrete filters, see e.g.
+ * Vasilyev, Lund & Moin (1998) Journal of Comp. Physics 146, 82 or
+ * Sagaut and Grohens (1999) Journal for Num. Meth. in Fluids 31, 1195
+ *
+ */
int grid::SGSUtil_FilterFields() {
if (ProcessorNumber != MyProcessorNumber) {
return SUCCESS;
@@ -43,7 +76,6 @@
int igrid, ifilter;
float totalWeight;
- /* this is !highly! inefficient, just making sure it's working */
for (int k = StartIndex[2]; k <= EndIndex[2]; k++)
for (int j = StartIndex[1]; j <= EndIndex[1]; j++)
for (int i = StartIndex[0]; i <= EndIndex[0]; i++) {
@@ -85,6 +117,11 @@
return SUCCESS;
}
+/*
+ * This functional calculated the Jacobian of an arbitrary 3-dimensional
+ * field (components given by field1, field2, and field3).
+ * The result is stored in the Jac array.
+ */
int grid::SGSUtil_ComputeJacobian(float *Jac[][MAX_DIMENSION],float *field1,float* field2,float* field3) {
if (ProcessorNumber != MyProcessorNumber) {
return SUCCESS;
@@ -157,7 +194,11 @@
return SUCCESS;
}
-
+/*
+ * This function conducts an explicit filter operation on mixed quantities, e.g.
+ * flt(rho u_i u_j), which are required by the scale-similarity SGS model.
+ * The same general comments as for SGSUtil_FilterFields (see above) apply.
+ */
int grid::SGSUtil_ComputeMixedFilteredQuantities() {
if (ProcessorNumber != MyProcessorNumber) {
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -1400,10 +1400,11 @@
}
/* In order to use filtered fields we need additional ghost zones */
-
if (SGSFilterStencil/2 + 2 > NumberOfGhostZones)
ENZO_FAIL("SGS filtering needs additional ghost zones!\n");
-
+
+ // all these models are calculated based on the partial derivatives of
+ // the primitive quantities
if (SGScoeffERS2J2 != 0. ||
SGScoeffERS2M2Star != 0. ||
SGScoeffEVStarEnS2Star != 0. ||
@@ -1415,6 +1416,7 @@
SGSNeedJacobians = 1;
+ // the scale-similarity type models need filtered mixed quantities
if (SGScoeffSSu != 0. ||
SGScoeffSSb != 0. ||
SGScoeffSSemf != 0.)
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/SetDefaultGlobalValues.C
--- a/src/enzo/SetDefaultGlobalValues.C
+++ b/src/enzo/SetDefaultGlobalValues.C
@@ -403,11 +403,12 @@
}
UseSGSModel = 0; // off
- SGSFilterStencil = 0;
- SGSNeedJacobians = 0;
- SGSNeedMixedFilteredQuantities = 0;
- SGSFilterWidth = 0.; // off
+ SGSFilterStencil = 0; // the one-dimensional stencil of the complete filter
+ SGSNeedJacobians = 0; // set automatically in ReadParameter file
+ SGSNeedMixedFilteredQuantities = 0; // set automatically in ReadParameter file
+ SGSFilterWidth = 0.; // off, i.e. use grid-scale quantities
for (i = 0; i < 4; i++)
+ // discrete filter weights of explicit filter
SGSFilterWeights[i] = 0.;
SGScoeffERS2J2 = 0.0; // off
SGScoeffERS2M2Star = 0.0; // off
diff -r c4aa82d4aefe -r 4ccd34abf5a5 src/enzo/hydro_rk/Grid_MHDSourceTerms.C
--- a/src/enzo/hydro_rk/Grid_MHDSourceTerms.C
+++ b/src/enzo/hydro_rk/Grid_MHDSourceTerms.C
@@ -333,25 +333,31 @@
}
if (UseSGSModel) {
+ // if an explicit filtering operation should be used, otherwise
+ // grid-scale quantities are used
if (SGSFilterWidth > 1.) {
if (this->SGSUtil_FilterFields() == FAIL) {
fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_FilterFields.\n");
return FAIL;
}
-
+ // if the partial derivatives of primitive variables are required
+ // in the calculation of the SGS models
if (SGSNeedJacobians) {
+ // velocity Jacobian
if (this->SGSUtil_ComputeJacobian(JacVel,FilteredFields[1],FilteredFields[2],FilteredFields[3]) == FAIL) {
fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(Vel).\n");
return FAIL;
}
+ // magnetic field Jacobian
if (this->SGSUtil_ComputeJacobian(JacB,FilteredFields[4],FilteredFields[5],FilteredFields[6]) == FAIL) {
fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(B).\n");
return FAIL;
}
}
+ // Scale-similarity type models need filtered mixed terms, such as flt(u_i B_j), etc.
if (SGSNeedMixedFilteredQuantities) {
if (this->SGSUtil_ComputeMixedFilteredQuantities() == FAIL) {
fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeMixedFilteredQuantities().\n");
https://bitbucket.org/enzo/enzo-dev/commits/6c50fc4f369d/
Changeset: 6c50fc4f369d
Branch: week-of-code
User: pgrete
Date: 2016-12-06 11:47:46+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 3 files
diff -r 4ccd34abf5a5 -r 6c50fc4f369d run/Hydro/Hydro-3D/RotatingSphere/RotatingSphere.enzo
--- a/run/Hydro/Hydro-3D/RotatingSphere/RotatingSphere.enzo
+++ b/run/Hydro/Hydro-3D/RotatingSphere/RotatingSphere.enzo
@@ -77,7 +77,7 @@
RotatingSphereCentralDensity = 1.0
RotatingSphereCoreDensityExponent = 0.1
RotatingSphereOuterDensityExponent = 2.5
-RotatingSphereExteriorTemperature = 200.0
+RotatingSphereExternalTemperature = 200.0
RotatingSphereSpinParameter = 0.05
RotatingSphereAngularMomentumExponent = 0.9
RotatingSphereUseTurbulence = 0
diff -r 4ccd34abf5a5 -r 6c50fc4f369d src/enzo/Grid_ComputeCoolingTime.C
--- a/src/enzo/Grid_ComputeCoolingTime.C
+++ b/src/enzo/Grid_ComputeCoolingTime.C
@@ -184,7 +184,7 @@
/* If both metal fields (Pop I/II and III) exist, create a field
that contains their sum */
- float *MetalPointer;
+ float *MetalPointer = NULL;
float *TotalMetals = NULL;
if (MetalNum != -1 && SNColourNum != -1) {
diff -r 4ccd34abf5a5 -r 6c50fc4f369d src/enzo/RotatingSphereInitialize.C
--- a/src/enzo/RotatingSphereInitialize.C
+++ b/src/enzo/RotatingSphereInitialize.C
@@ -120,7 +120,7 @@
ret += sscanf(line, "RotatingSphereCentralDensity = %"FSYM, &RotatingSphereCentralDensity);
ret += sscanf(line, "RotatingSphereCoreDensityExponent = %"FSYM, &RotatingSphereCoreDensityExponent);
ret += sscanf(line, "RotatingSphereOuterDensityExponent = %"FSYM, &RotatingSphereOuterDensityExponent);
- ret += sscanf(line, "RotatingSphereExernalTemperature = %"FSYM, &RotatingSphereExternalTemperature);
+ ret += sscanf(line, "RotatingSphereExternalTemperature = %"FSYM, &RotatingSphereExternalTemperature);
ret += sscanf(line, "RotatingSphereSpinParameter = %"FSYM, &RotatingSphereSpinParameter);
ret += sscanf(line, "RotatingSphereAngularMomentumExponent = %"FSYM, &RotatingSphereAngularMomentumExponent);
ret += sscanf(line, "RotatingSphereUseTurbulence = %"ISYM, &RotatingSphereUseTurbulence);
https://bitbucket.org/enzo/enzo-dev/commits/af802074fb97/
Changeset: af802074fb97
Branch: week-of-code
User: pgrete
Date: 2017-01-02 13:22:17+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 8 files
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/EvolvePhotons.C
--- a/src/enzo/EvolvePhotons.C
+++ b/src/enzo/EvolvePhotons.C
@@ -24,6 +24,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
+#include "preincludes.h"
#include "performance.h"
#include "ErrorExceptions.h"
#include "EnzoTiming.h"
@@ -588,6 +589,18 @@
if (Temp->GridData->RadiationPresent() == TRUE) {
int RTCoupledSolverIntermediateStep = TRUE;
+
+#ifdef USE_GRACKLE
+ if (grackle_data->use_grackle == TRUE){
+ grackle_data->radiative_transfer_intermediate_step = (Eint32) RTCoupledSolverIntermediateStep;
+
+ if (Temp->GridData->GrackleWrapper() == FAIL){
+ ENZO_FAIL("Error in GrackleWrapper.\n");
+ }
+ continue;
+ }
+#endif // USE_GRACKLE
+
Temp->GridData->SolveRateAndCoolEquations(RTCoupledSolverIntermediateStep);
} /* ENDIF radiation */
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/Grid_ComputeCoolingTime.C
--- a/src/enzo/Grid_ComputeCoolingTime.C
+++ b/src/enzo/Grid_ComputeCoolingTime.C
@@ -143,6 +143,9 @@
float *velocity1 = BaryonField[Vel1Num];
float *velocity2 = BaryonField[Vel2Num];
float *velocity3 = BaryonField[Vel3Num];
+
+ float *volumetric_heating_rate = NULL;
+ float *specific_heating_rate = NULL;
/* Compute the cooling time. */
@@ -201,7 +204,7 @@
} // ENDELSE both metal types
#ifdef USE_GRACKLE
- if (grackle_data.use_grackle == TRUE) {
+ if (grackle_data->use_grackle == TRUE) {
Eint32 *g_grid_dimension, *g_grid_start, *g_grid_end;
g_grid_dimension = new Eint32[GridRank];
@@ -222,13 +225,14 @@
grackle_units.time_units = (double) TimeUnits;
grackle_units.velocity_units = (double) VelocityUnits;
grackle_units.a_units = (double) aUnits;
+ grackle_units.a_value = (double) a;
int temp_thermal = FALSE;
float *thermal_energy;
if ( UseMHD ){
iBx = FindField(Bfield1, FieldType, NumberOfBaryonFields);
iBy = FindField(Bfield2, FieldType, NumberOfBaryonFields);
- iBz = FindField(Bfield3, FieldType, NumberOfBaryonFields);
+ iBz = FindField(Bfield3, FieldType, NumberOfBaryonFields);
}
if (HydroMethod==Zeus_Hydro) {
@@ -257,19 +261,67 @@
} // for (int i = 0; i < size; i++)
}
- if (calculate_cooling_time(&grackle_units,
- (double) afloat,
- (Eint32) GridRank, g_grid_dimension,
- g_grid_start, g_grid_end,
- density, thermal_energy,
- velocity1, velocity2, velocity3,
- BaryonField[HINum], BaryonField[HIINum],
- BaryonField[HMNum], BaryonField[HeINum],
- BaryonField[HeIINum], BaryonField[HeIIINum],
- BaryonField[H2INum], BaryonField[H2IINum],
- BaryonField[DINum], BaryonField[DIINum],
- BaryonField[HDINum], BaryonField[DeNum],
- MetalPointer, cooling_time) == FAIL) {
+ /* set up the my_fields */
+ grackle_field_data my_fields;
+
+ my_fields.grid_rank = (Eint32) GridRank;
+ my_fields.grid_dimension = g_grid_dimension;
+ my_fields.grid_start = g_grid_start;
+ my_fields.grid_end = g_grid_end;
+
+ /* now add in the baryon fields */
+ my_fields.density = density;
+ my_fields.internal_energy = thermal_energy;
+ my_fields.x_velocity = velocity1;
+ my_fields.y_velocity = velocity2;
+ my_fields.z_velocity = velocity3;
+
+ my_fields.HI_density = BaryonField[HINum];
+ my_fields.HII_density = BaryonField[HIINum];
+ my_fields.HeI_density = BaryonField[HeINum];
+ my_fields.HeII_density = BaryonField[HeIINum];
+ my_fields.HeIII_density = BaryonField[HeIIINum];
+ my_fields.e_density = BaryonField[DeNum];
+
+ my_fields.HM_density = BaryonField[HMNum];
+ my_fields.H2I_density = BaryonField[H2INum];
+ my_fields.H2II_density = BaryonField[H2IINum];
+
+ my_fields.DI_density = BaryonField[DINum];
+ my_fields.DII_density = BaryonField[DIINum];
+ my_fields.HDI_density = BaryonField[HDINum];
+
+ my_fields.metal_density = MetalPointer;
+
+ my_fields.volumetric_heating_rate = volumetric_heating_rate;
+ my_fields.specific_heating_rate = specific_heating_rate;
+
+#ifdef TRANSFER
+
+ /* unit conversion from Enzo RT units to CGS */
+ const float ev2erg = 1.60217653E-12;
+ float rtunits = ev2erg / TimeUnits;
+
+ if ( RadiativeTransfer ){
+ my_fields.RT_HI_ionization_rate = BaryonField[kphHINum];
+
+ if (RadiativeTransferHydrogenOnly == FALSE){
+ my_fields.RT_HeI_ionization_rate = BaryonField[kphHeINum];
+ my_fields.RT_HeII_ionization_rate = BaryonField[kphHeIINum];
+ }
+
+ if (MultiSpecies > 1)
+ my_fields.RT_H2_dissociation_rate = BaryonField[kdissH2INum];
+
+ /* need to convert to CGS units */
+ for( i = 0; i < size; i++) BaryonField[gammaNum][i] *= rtunits;
+
+ my_fields.RT_heating_rate = BaryonField[gammaNum];
+
+ }
+#endif // TRANSFER
+
+ if (calculate_cooling_time(&grackle_units, &my_fields, cooling_time) == FAIL) {
ENZO_FAIL("Error in Grackle calculate_cooling_time.\n");
}
@@ -281,6 +333,14 @@
delete [] thermal_energy;
}
+#ifdef TRANSFER
+ if (RadiativeTransfer){
+ /* convert the RT units back to Enzo */
+ for(i = 0; i < size; i ++) BaryonField[gammaNum][i] /= rtunits;
+
+ }
+#endif // TRANSFER
+
delete [] TotalMetals;
delete [] g_grid_dimension;
delete [] g_grid_start;
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/Grid_GrackleWrapper.C
--- a/src/enzo/Grid_GrackleWrapper.C
+++ b/src/enzo/Grid_GrackleWrapper.C
@@ -36,7 +36,7 @@
{
#ifdef USE_GRACKLE
- if (grackle_data.use_grackle == FALSE)
+ if (grackle_data->use_grackle == FALSE)
return SUCCESS;
if (ProcessorNumber != MyProcessorNumber)
@@ -90,9 +90,12 @@
float *velocity1 = BaryonField[Vel1Num];
float *velocity2 = BaryonField[Vel2Num];
float *velocity3 = BaryonField[Vel3Num];
-
+
+ float *volumetric_heating_rate = NULL;
+ float *specific_heating_rate = NULL;
+
/* Compute the cooling time. */
-
+
FLOAT a = 1.0, dadt;
float TemperatureUnits = 1, DensityUnits = 1, LengthUnits = 1,
VelocityUnits = 1, TimeUnits = 1, aUnits = 1;
@@ -118,6 +121,7 @@
grackle_units.time_units = (double) TimeUnits;
grackle_units.velocity_units = (double) VelocityUnits;
grackle_units.a_units = (double) aUnits;
+ grackle_units.a_value = (double) a;
/* Metal cooling codes. */
@@ -191,27 +195,83 @@
} // for (int i = 0; i < size; i++)
}
+ //
+ // Put code here to assign fields to volumetric or specific
+ // heating rate pointers
+ //
+
+ /* set up grackle fields object */
+ grackle_field_data my_fields;
+
+ my_fields.grid_rank = (Eint32) GridRank;
+ my_fields.grid_dimension = g_grid_dimension;
+ my_fields.grid_start = g_grid_start;
+ my_fields.grid_end = g_grid_end;
+
+ /* now add in the baryon fields */
+ my_fields.density = density;
+ my_fields.internal_energy = thermal_energy;
+ my_fields.x_velocity = velocity1;
+ my_fields.y_velocity = velocity2;
+ my_fields.z_velocity = velocity3;
+ my_fields.HI_density = BaryonField[HINum];
+ my_fields.HII_density = BaryonField[HIINum];
+ my_fields.HeI_density = BaryonField[HeINum];
+ my_fields.HeII_density = BaryonField[HeIINum];
+ my_fields.HeIII_density = BaryonField[HeIIINum];
+ my_fields.e_density = BaryonField[DeNum];
+
+ my_fields.HM_density = BaryonField[HMNum];
+ my_fields.H2I_density = BaryonField[H2INum];
+ my_fields.H2II_density = BaryonField[H2IINum];
+
+ my_fields.DI_density = BaryonField[DINum];
+ my_fields.DII_density = BaryonField[DIINum];
+ my_fields.HDI_density = BaryonField[HDINum];
+
+ my_fields.metal_density = MetalPointer;
+
+ my_fields.volumetric_heating_rate = volumetric_heating_rate;
+ my_fields.specific_heating_rate = specific_heating_rate;
+
+#ifdef TRANSFER
+ /* Find RT fields */
+ int kphHINum, kphHeINum, kphHeIINum, kdissH2INum,
+ gammaNum;
+
+ IdentifyRadiativeTransferFields(kphHINum, gammaNum, kphHeINum,
+ kphHeIINum, kdissH2INum);
+
+ /* unit conversion from Enzo RT units to CGS */
+ const float ev2erg = 1.60217653E-12;
+ float rtunits = ev2erg / TimeUnits;
+
+ if( RadiativeTransfer ){
+ my_fields.RT_HI_ionization_rate = BaryonField[kphHINum];
+
+ if (RadiativeTransferHydrogenOnly == FALSE){
+ my_fields.RT_HeI_ionization_rate = BaryonField[kphHeINum];
+ my_fields.RT_HeII_ionization_rate = BaryonField[kphHeIINum];
+ }
+
+ if (MultiSpecies > 1)
+ my_fields.RT_H2_dissociation_rate = BaryonField[kdissH2INum];
+
+ /* need to convert to CGS units */
+ for( i = 0; i < size; i++) BaryonField[gammaNum][i] *= rtunits;
+
+ my_fields.RT_heating_rate = BaryonField[gammaNum];
+
+ }
+#endif // TRANSFER
+
/* Call the chemistry solver. */
- if (solve_chemistry(&grackle_units,
- (double) afloat, (double) dtFixed,
- (Eint32) GridRank, g_grid_dimension,
- g_grid_start, g_grid_end,
- density, thermal_energy,
- velocity1, velocity2, velocity3,
- BaryonField[HINum], BaryonField[HIINum],
- BaryonField[HMNum], BaryonField[HeINum],
- BaryonField[HeIINum], BaryonField[HeIIINum],
- BaryonField[H2INum], BaryonField[H2IINum],
- BaryonField[DINum], BaryonField[DIINum],
- BaryonField[HDINum], BaryonField[DeNum],
- MetalPointer) == FAIL) {
+ if (solve_chemistry(&grackle_units, &my_fields, (double) dtFixed) == FAIL){
fprintf(stderr, "Error in Grackle solve_chemistry.\n");
return FAIL;
}
- // Set the total energy appropriately for the updated thermal energy.
-
if (HydroMethod != Zeus_Hydro) {
for (i = 0; i < size; i++) {
BaryonField[TENum][i] = thermal_energy[i] +
@@ -235,6 +295,15 @@
delete [] thermal_energy;
}
+#ifdef TRANSFER
+ if (RadiativeTransfer){
+ /* convert the RT units back to Enzo */
+ for(i = 0; i < size; i ++) BaryonField[gammaNum][i] /= rtunits;
+
+ }
+#endif TRANSFER
+
+
delete [] TotalMetals;
delete [] g_grid_dimension;
delete [] g_grid_start;
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/Grid_MultiSpeciesHandler.C
--- a/src/enzo/Grid_MultiSpeciesHandler.C
+++ b/src/enzo/Grid_MultiSpeciesHandler.C
@@ -31,7 +31,7 @@
LCAPERF_START("grid_MultiSpeciesHandler");
#ifdef USE_GRACKLE
- if (grackle_data.use_grackle == TRUE) {
+ if (grackle_data->use_grackle == TRUE) {
if (this->GrackleWrapper() == FAIL) {
ENZO_FAIL("Error in GrackleWrapper.\n");
}
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/Grid_ProjectSolutionToParentGrid.C
--- a/src/enzo/Grid_ProjectSolutionToParentGrid.C
+++ b/src/enzo/Grid_ProjectSolutionToParentGrid.C
@@ -455,7 +455,7 @@
if (ParentGrid.ProcessorNumber != MyProcessorNumber)
for (field = 0; field < NumberOfBaryonFields; field++) {
- delete ParentGrid.BaryonField[field];
+ delete [] ParentGrid.BaryonField[field];
ParentGrid.BaryonField[field] = NULL;
}
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -551,20 +551,29 @@
#ifdef USE_GRACKLE
/* Grackle chemistry parameters */
- ret += sscanf(line, "use_grackle = %d", &grackle_data.use_grackle);
+ ret += sscanf(line, "use_grackle = %d", &grackle_data->use_grackle);
ret += sscanf(line, "with_radiative_cooling = %d",
- &grackle_data.with_radiative_cooling);
+ &grackle_data->with_radiative_cooling);
+ ret += sscanf(line, "use_volumetric_heating_rate = %d",
+ &grackle_data->use_volumetric_heating_rate);
+ ret += sscanf(line, "use_specific_heating_rate = %d",
+ &grackle_data->use_specific_heating_rate);
+ ret += sscanf(line, "self_shielding_method = %d",
+ &grackle_data->self_shielding_method);
+ ret += sscanf(line, "radiative_transfer_intermediate_step = %d",
+ &grackle_data->radiative_transfer_intermediate_step);
+
if (sscanf(line, "grackle_data_file = %s", dummy) == 1) {
- grackle_data.grackle_data_file = dummy;
+ grackle_data->grackle_data_file = dummy;
ret++;
}
- ret += sscanf(line, "UVbackground = %d", &grackle_data.UVbackground);
+ ret += sscanf(line, "UVbackground = %d", &grackle_data->UVbackground);
ret += sscanf(line, "Compton_xray_heating = %d",
- &grackle_data.Compton_xray_heating);
+ &grackle_data->Compton_xray_heating);
ret += sscanf(line, "LWbackground_intensity = %lf",
- &grackle_data.LWbackground_intensity);
+ &grackle_data->LWbackground_intensity);
ret += sscanf(line, "LWbackground_sawtooth_suppression = %d",
- &grackle_data.LWbackground_sawtooth_suppression);
+ &grackle_data->LWbackground_sawtooth_suppression);
/********************************/
#endif
ret += sscanf(line, "RadiativeCooling = %"ISYM, &RadiativeCooling);
@@ -1609,34 +1618,37 @@
#ifdef USE_GRACKLE
/* If using Grackle chemistry and cooling library, override all other
cooling machinery and do a translation of some of the parameters. */
- if (grackle_data.use_grackle == TRUE) {
- // grackle_data.use_grackle already set
- // grackle_data.with_radiative_cooling already set
- // grackle_data.grackle_data_file already set
- // grackle_data.UVbackground already set
- // grackle_data.Compton_xray_heating already set
- // grackle_data.LWbackground_intensity already set
- // grackle_data.LWbackground_sawtooth_suppression already set
- grackle_data.Gamma = (double) Gamma;
- grackle_data.primordial_chemistry = (Eint32) MultiSpecies;
- grackle_data.metal_cooling = (Eint32) MetalCooling;
- grackle_data.h2_on_dust = (Eint32) H2FormationOnDust;
- grackle_data.cmb_temperature_floor = (Eint32) CloudyCoolingData.CMBTemperatureFloor;
- grackle_data.three_body_rate = (Eint32) ThreeBodyRate;
- grackle_data.cie_cooling = (Eint32) CIECooling;
- grackle_data.h2_optical_depth_approximation = (Eint32) H2OpticalDepthApproximation;
- grackle_data.photoelectric_heating = (Eint32) PhotoelectricHeating;
- grackle_data.photoelectric_heating_rate = (double) PhotoelectricHeatingRate;
- grackle_data.NumberOfTemperatureBins = (Eint32) CoolData.NumberOfTemperatureBins;
- grackle_data.CaseBRecombination = (Eint32) RateData.CaseBRecombination;
- grackle_data.TemperatureStart = (double) CoolData.TemperatureStart;
- grackle_data.TemperatureEnd = (double) CoolData.TemperatureEnd;
- grackle_data.NumberOfDustTemperatureBins = (Eint32) RateData.NumberOfDustTemperatureBins;
- grackle_data.DustTemperatureStart = (double) RateData.DustTemperatureStart;
- grackle_data.DustTemperatureEnd = (double) RateData.DustTemperatureEnd;
- grackle_data.HydrogenFractionByMass = (double) CoolData.HydrogenFractionByMass;
- grackle_data.DeuteriumToHydrogenRatio = (double) CoolData.DeuteriumToHydrogenRatio;
- grackle_data.SolarMetalFractionByMass = (double) CoolData.SolarMetalFractionByMass;
+ if (grackle_data->use_grackle == TRUE) {
+ // grackle_data->use_grackle already set
+ // grackle_data->with_radiative_cooling already set
+ // grackle_data->grackle_data_file already set
+ // grackle_data->UVbackground already set
+ // grackle_data->Compton_xray_heating already set
+ // grackle_data->LWbackground_intensity already set
+ // grackle_data->LWbackground_sawtooth_suppression already set
+ grackle_data->Gamma = (double) Gamma;
+ grackle_data->primordial_chemistry = (Eint32) MultiSpecies;
+ grackle_data->metal_cooling = (Eint32) MetalCooling;
+ grackle_data->h2_on_dust = (Eint32) H2FormationOnDust;
+ grackle_data->cmb_temperature_floor = (Eint32) CloudyCoolingData.CMBTemperatureFloor;
+ grackle_data->three_body_rate = (Eint32) ThreeBodyRate;
+ grackle_data->cie_cooling = (Eint32) CIECooling;
+ grackle_data->h2_optical_depth_approximation = (Eint32) H2OpticalDepthApproximation;
+ grackle_data->photoelectric_heating = (Eint32) PhotoelectricHeating;
+ grackle_data->photoelectric_heating_rate = (double) PhotoelectricHeatingRate;
+ grackle_data->NumberOfTemperatureBins = (Eint32) CoolData.NumberOfTemperatureBins;
+ grackle_data->CaseBRecombination = (Eint32) RateData.CaseBRecombination;
+ grackle_data->TemperatureStart = (double) CoolData.TemperatureStart;
+ grackle_data->TemperatureEnd = (double) CoolData.TemperatureEnd;
+ grackle_data->NumberOfDustTemperatureBins = (Eint32) RateData.NumberOfDustTemperatureBins;
+ grackle_data->DustTemperatureStart = (double) RateData.DustTemperatureStart;
+ grackle_data->DustTemperatureEnd = (double) RateData.DustTemperatureEnd;
+ grackle_data->HydrogenFractionByMass = (double) CoolData.HydrogenFractionByMass;
+ grackle_data->DeuteriumToHydrogenRatio = (double) CoolData.DeuteriumToHydrogenRatio;
+ grackle_data->SolarMetalFractionByMass = (double) CoolData.SolarMetalFractionByMass;
+ grackle_data->use_radiative_transfer = (Eint32) RadiativeTransfer;
+ grackle_data->radiative_transfer_coupled_rate_solver = (Eint32) RadiativeTransferCoupledRateSolver;
+ grackle_data->radiative_transfer_hydrogen_only = (Eint32) RadiativeTransferHydrogenOnly;
// Initialize units structure.
FLOAT a_value, dadt;
@@ -1659,13 +1671,13 @@
grackle_units.length_units = (double) LengthUnits;
grackle_units.time_units = (double) TimeUnits;
grackle_units.velocity_units = (double) VelocityUnits;
+ grackle_units.a_value = (double) a_value;
// Initialize chemistry structure.
- if (initialize_chemistry_data(&grackle_units,
- (double) a_value) == FAIL) {
+ if (initialize_chemistry_data(&grackle_units) == FAIL) {
ENZO_FAIL("Error in Grackle initialize_chemistry_data.\n");
}
- } // if (grackle_data.use_grackle == TRUE)
+ } // if (grackle_data->use_grackle == TRUE)
else {
#endif // USE_GRACKE
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/SetDefaultGlobalValues.C
--- a/src/enzo/SetDefaultGlobalValues.C
+++ b/src/enzo/SetDefaultGlobalValues.C
@@ -506,31 +506,34 @@
CloudyCoolingData.CloudyElectronFractionFactor = 9.153959e-3; // calculated using Cloudy 07.02 abundances
#ifdef USE_GRACKLE
- // Grackle chemistry data structure.
- if (set_default_chemistry_parameters() == FAIL) {
+ // Grackle chemistry data structure.
+ chemistry_data *my_chemistry;
+ my_chemistry = new chemistry_data;
+ if (set_default_chemistry_parameters(my_chemistry) == FAIL) {
ENZO_FAIL("Error in grackle: set_default_chemistry_parameters\n");
}
+
// Map Grackle defaults to corresponding Enzo parameters
- Gamma = (float) grackle_data.Gamma;
- MultiSpecies = (int) grackle_data.primordial_chemistry;
- MetalCooling = (int) grackle_data.metal_cooling;
- H2FormationOnDust = (int) grackle_data.h2_on_dust;
- CloudyCoolingData.CMBTemperatureFloor = (int) grackle_data.cmb_temperature_floor;
- ThreeBodyRate = (int) grackle_data.three_body_rate;
- CIECooling = (int) grackle_data.cie_cooling;
- H2OpticalDepthApproximation = (int) grackle_data.h2_optical_depth_approximation;
- PhotoelectricHeating = (int) grackle_data.photoelectric_heating;
- PhotoelectricHeatingRate = (float) grackle_data.photoelectric_heating_rate;
- CoolData.NumberOfTemperatureBins = (int) grackle_data.NumberOfTemperatureBins;
- RateData.CaseBRecombination = (int) grackle_data.CaseBRecombination;
- CoolData.TemperatureStart = (float) grackle_data.TemperatureStart;
- CoolData.TemperatureEnd = (float) grackle_data.TemperatureEnd;
- RateData.NumberOfDustTemperatureBins = (int) grackle_data.NumberOfDustTemperatureBins;
- RateData.DustTemperatureStart = (float) grackle_data.DustTemperatureStart;
- RateData.DustTemperatureEnd = (float) grackle_data.DustTemperatureEnd;
- CoolData.HydrogenFractionByMass = (float) grackle_data.HydrogenFractionByMass;
- CoolData.DeuteriumToHydrogenRatio = (float) grackle_data.DeuteriumToHydrogenRatio;
- CoolData.SolarMetalFractionByMass = (float) grackle_data.SolarMetalFractionByMass;
+ Gamma = (float) grackle_data->Gamma;
+ MultiSpecies = (int) grackle_data->primordial_chemistry;
+ MetalCooling = (int) grackle_data->metal_cooling;
+ H2FormationOnDust = (int) grackle_data->h2_on_dust;
+ CloudyCoolingData.CMBTemperatureFloor = (int) grackle_data->cmb_temperature_floor;
+ ThreeBodyRate = (int) grackle_data->three_body_rate;
+ CIECooling = (int) grackle_data->cie_cooling;
+ H2OpticalDepthApproximation = (int) grackle_data->h2_optical_depth_approximation;
+ PhotoelectricHeating = (int) grackle_data->photoelectric_heating;
+ PhotoelectricHeatingRate = (float) grackle_data->photoelectric_heating_rate;
+ CoolData.NumberOfTemperatureBins = (int) grackle_data->NumberOfTemperatureBins;
+ RateData.CaseBRecombination = (int) grackle_data->CaseBRecombination;
+ CoolData.TemperatureStart = (float) grackle_data->TemperatureStart;
+ CoolData.TemperatureEnd = (float) grackle_data->TemperatureEnd;
+ RateData.NumberOfDustTemperatureBins = (int) grackle_data->NumberOfDustTemperatureBins;
+ RateData.DustTemperatureStart = (float) grackle_data->DustTemperatureStart;
+ RateData.DustTemperatureEnd = (float) grackle_data->DustTemperatureEnd;
+ CoolData.HydrogenFractionByMass = (float) grackle_data->HydrogenFractionByMass;
+ CoolData.DeuteriumToHydrogenRatio = (float) grackle_data->DeuteriumToHydrogenRatio;
+ CoolData.SolarMetalFractionByMass = (float) grackle_data->SolarMetalFractionByMass;
#endif
OutputCoolingTime = FALSE;
diff -r 6c50fc4f369d -r af802074fb97 src/enzo/WriteParameterFile.C
--- a/src/enzo/WriteParameterFile.C
+++ b/src/enzo/WriteParameterFile.C
@@ -505,13 +505,17 @@
fprintf(fptr, "SGScoeffNLb = %"FSYM"\n", SGScoeffNLb);
#ifdef USE_GRACKLE
/* Grackle chemistry parameters */
- fprintf(fptr, "use_grackle = %d\n", grackle_data.use_grackle);
- fprintf(fptr, "with_radiative_cooling = %d\n", grackle_data.with_radiative_cooling);
- fprintf(fptr, "grackle_data_file = %s\n", grackle_data.grackle_data_file);
- fprintf(fptr, "UVbackground = %d\n", grackle_data.UVbackground);
- fprintf(fptr, "Compton_xray_heating = %d\n", grackle_data.Compton_xray_heating);
- fprintf(fptr, "LWbackground_intensity = %lf\n", grackle_data.LWbackground_intensity);
- fprintf(fptr, "LWbackground_sawtooth_suppression = %d\n", grackle_data.LWbackground_sawtooth_suppression);
+ fprintf(fptr, "use_grackle = %d\n", grackle_data->use_grackle);
+ fprintf(fptr, "with_radiative_cooling = %d\n", grackle_data->with_radiative_cooling);
+ fprintf(fptr, "use_volumetric_heating_rate = %d\n", grackle_data->use_volumetric_heating_rate);
+ fprintf(fptr, "use_specific_heating_rate = %d\n", grackle_data->use_specific_heating_rate);
+ fprintf(fptr, "self_shielding_method = %d\n", grackle_data->self_shielding_method);
+ fprintf(fptr, "radiative_transfer_intermediate_step = %d\n", grackle_data->radiative_transfer_intermediate_step);
+ fprintf(fptr, "grackle_data_file = %s\n", grackle_data->grackle_data_file);
+ fprintf(fptr, "UVbackground = %d\n", grackle_data->UVbackground);
+ fprintf(fptr, "Compton_xray_heating = %d\n", grackle_data->Compton_xray_heating);
+ fprintf(fptr, "LWbackground_intensity = %lf\n", grackle_data->LWbackground_intensity);
+ fprintf(fptr, "LWbackground_sawtooth_suppression = %d\n", grackle_data->LWbackground_sawtooth_suppression);
/********************************/
#endif
fprintf(fptr, "RadiativeCooling = %"ISYM"\n", RadiativeCooling);
https://bitbucket.org/enzo/enzo-dev/commits/e51a5d4a47f9/
Changeset: e51a5d4a47f9
Branch: week-of-code
User: pgrete
Date: 2017-03-14 19:14:02+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 8 files
diff -r af802074fb97 -r e51a5d4a47f9 run/Hydro/Hydro-3D/AgoraGalaxy/AgoraRestart.enzo
--- a/run/Hydro/Hydro-3D/AgoraGalaxy/AgoraRestart.enzo
+++ b/run/Hydro/Hydro-3D/AgoraGalaxy/AgoraRestart.enzo
@@ -12,6 +12,7 @@
TopGridDimensions = 64 64 64
SelfGravity = 1 // gravity on
TopGridGravityBoundary = 1 // isolated gravity BCs
+UnigridTranspose = 0
LeftFaceBoundaryCondition = 3 3 3 // periodic
RightFaceBoundaryCondition = 3 3 3
DomainLeftEdge = 0 0 0
diff -r af802074fb97 -r e51a5d4a47f9 run/Hydro/Hydro-3D/AgoraGalaxy/prepare_sim.sh
--- a/run/Hydro/Hydro-3D/AgoraGalaxy/prepare_sim.sh
+++ b/run/Hydro/Hydro-3D/AgoraGalaxy/prepare_sim.sh
@@ -1,5 +1,5 @@
wget -O ./LOW.tar.gz https://www.dropbox.com/sh/1xzt1rysy9v3a9l/AAAMlJBQG1OQFW4cjhp11Ex6a/LOW.tar.gz?dl=1
-wget https://bitbucket.org/grackle/grackle/src/default/input/CloudyData_noUVB.h5
+wget https://bitbucket.org/grackle/grackle/raw/default/input/CloudyData_noUVB.h5
tar xzvf LOW.tar.gz
mv LOW/*.dat ./
rmdir LOW
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/Grid_ComputeCoolingTime.C
--- a/src/enzo/Grid_ComputeCoolingTime.C
+++ b/src/enzo/Grid_ComputeCoolingTime.C
@@ -268,6 +268,7 @@
my_fields.grid_dimension = g_grid_dimension;
my_fields.grid_start = g_grid_start;
my_fields.grid_end = g_grid_end;
+ my_fields.grid_dx = this->CellWidth[0][0];
/* now add in the baryon fields */
my_fields.density = density;
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/Grid_GrackleWrapper.C
--- a/src/enzo/Grid_GrackleWrapper.C
+++ b/src/enzo/Grid_GrackleWrapper.C
@@ -207,6 +207,7 @@
my_fields.grid_dimension = g_grid_dimension;
my_fields.grid_start = g_grid_start;
my_fields.grid_end = g_grid_end;
+ my_fields.grid_dx = this->CellWidth[0][0];
/* now add in the baryon fields */
my_fields.density = density;
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/Make.mach.ncsa-bluewaters-gnu
--- a/src/enzo/Make.mach.ncsa-bluewaters-gnu
+++ b/src/enzo/Make.mach.ncsa-bluewaters-gnu
@@ -27,7 +27,7 @@
#-----------------------------------------------------------------------
LOCAL_MPI_INSTALL =
-LOCAL_HDF5_INSTALL = /opt/cray/hdf5/1.8.14/gnu/4.9
+LOCAL_HDF5_INSTALL = $(HDF5_ROOT)
LOCAL_HYPRE_INSTALL =
LOCAL_PYTHON_INSTALL =
LOCAL_GRACKLE_INSTALL = $(HOME)/local/gnu/grackle
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -560,6 +560,8 @@
&grackle_data->use_specific_heating_rate);
ret += sscanf(line, "self_shielding_method = %d",
&grackle_data->self_shielding_method);
+ ret += sscanf(line, "H2_self_shielding = %d",
+ &grackle_data->H2_self_shielding);
ret += sscanf(line, "radiative_transfer_intermediate_step = %d",
&grackle_data->radiative_transfer_intermediate_step);
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/WriteParameterFile.C
--- a/src/enzo/WriteParameterFile.C
+++ b/src/enzo/WriteParameterFile.C
@@ -510,6 +510,7 @@
fprintf(fptr, "use_volumetric_heating_rate = %d\n", grackle_data->use_volumetric_heating_rate);
fprintf(fptr, "use_specific_heating_rate = %d\n", grackle_data->use_specific_heating_rate);
fprintf(fptr, "self_shielding_method = %d\n", grackle_data->self_shielding_method);
+ fprintf(fptr, "H2_self_shielding = %d\n", grackle_data->H2_self_shielding);
fprintf(fptr, "radiative_transfer_intermediate_step = %d\n", grackle_data->radiative_transfer_intermediate_step);
fprintf(fptr, "grackle_data_file = %s\n", grackle_data->grackle_data_file);
fprintf(fptr, "UVbackground = %d\n", grackle_data->UVbackground);
diff -r af802074fb97 -r e51a5d4a47f9 src/enzo/create_config_info.py
--- a/src/enzo/create_config_info.py
+++ b/src/enzo/create_config_info.py
@@ -19,7 +19,7 @@
from mercurial import hg, ui, commands
from mercurial.error import RepoError
except ImportError:
- print "WARNING: could not get version information. Please install mercurial."
+ print("WARNING: could not get version information. Please install mercurial.")
return ('unknown', 'unknown', None)
try:
@@ -33,7 +33,7 @@
my_diff = u.popbuffer()
return (my_info[0], my_info[1], my_diff)
except RepoError:
- print "WARNING: could not get version information."
+ print("WARNING: could not get version information.")
return ('unknown', 'unknown', None)
def get_options(filename, my_options=None, get_list_order=False):
https://bitbucket.org/enzo/enzo-dev/commits/7625ee3b9da4/
Changeset: 7625ee3b9da4
Branch: week-of-code
User: Philipp Grete
Date: 2017-03-14 21:46:20+00:00
Summary: Added support for pure hydro SGS simulations
Affected #: 4 files
diff -r e51a5d4a47f9 -r 7625ee3b9da4 run/MHD/3D/StochasticForcing/StochasticForcing.enzo
--- a/run/MHD/3D/StochasticForcing/StochasticForcing.enzo
+++ b/run/MHD/3D/StochasticForcing/StochasticForcing.enzo
@@ -64,4 +64,23 @@
DrivenFlowPressure = 1.0 # initial uniform pressure
DrivenFlowMagField = 3.1622777 # initial uniform field (x direction)
+UseSGSModel = 1 # use SGS model
+SGSFilterWidth = 2.7110
+SGSFilterStencil = 3
+SGSFilterWeights = 0.40150 0.29925 0.00000 0.0
+
+SGScoeffNLemfCompr = 1.0 # compr. unscaled nonliner model EMF
+SGScoeffNLu = 1.0 # compr. unscaled nonlinear model tauU
+SGScoeffNLb = 0.0 # compr. unscaled nonlinear model tauB
+#SGScoeffERS2J2 = 0.0 # eddy resistivity EMF model scaled by Smag.
+#energies
+##SGScoeffERS2M2Star = 0.012 # eddy resistivity EMF model scaled by
+#realiz. energies
+##SGScoeffEVStarEnS2Star = 0.01 # eddy viscosity tauUstar scaled by
+#realiz. energies
+##SGScoeffEnS2StarTrace = 0.08 # tauUtrace - SGS energy coeff from
+#realiz. energies
+##SGScoeffSSu = 0.67
+##SGScoeffSSb = 0.9
+##SGScoeffSSemf = 0.89
diff -r e51a5d4a47f9 -r 7625ee3b9da4 src/enzo/Grid_SGSUtilities.C
--- a/src/enzo/Grid_SGSUtilities.C
+++ b/src/enzo/Grid_SGSUtilities.C
@@ -65,7 +65,13 @@
EndIndex[dim] = GridEndIndex[dim] + 2;
}
- for (int m = 0; m < 7; m++)
+ int NumFilteredFields;
+ if (UseMHD)
+ NumFilteredFields = 7;
+ else
+ NumFilteredFields = 4;
+
+ for (int m = 0; m < NumFilteredFields; m++)
if (FilteredFields[m] == NULL) {
FilteredFields[m] = new float[size];
for (int o = 0; o < size; o++)
@@ -82,7 +88,7 @@
igrid = i + (j+k*GridDimension[1])*GridDimension[0];
- for (int l = 0; l < 7; l++)
+ for (int l = 0; l < NumFilteredFields; l++)
FilteredFields[l][igrid] = 0.;
for (int l = -N; l <= N; l++)
@@ -102,9 +108,11 @@
FilteredFields[3][igrid] += totalWeight * BaryonField[DensNum][ifilter]*BaryonField[Vel3Num][ifilter];
// magnetic fields
- FilteredFields[4][igrid] += totalWeight * BaryonField[B1Num][ifilter];
- FilteredFields[5][igrid] += totalWeight * BaryonField[B2Num][ifilter];
- FilteredFields[6][igrid] += totalWeight * BaryonField[B3Num][ifilter];
+ if (UseMHD) {
+ FilteredFields[4][igrid] += totalWeight * BaryonField[B1Num][ifilter];
+ FilteredFields[5][igrid] += totalWeight * BaryonField[B2Num][ifilter];
+ FilteredFields[6][igrid] += totalWeight * BaryonField[B3Num][ifilter];
+ }
}
// now that the density is filtered, we can finalize mass-weighted filtering
@@ -229,18 +237,23 @@
for (int o = 0; o < size; o++)
FltrhoUU[m][o] = 0.;
}
- if (FltBB[m] == NULL) {
- FltBB[m] = new float[size];
- for (int o = 0; o < size; o++)
- FltBB[m][o] = 0.;
- }
}
- for (int m = 0; m < 3; m++) {
- if (FltUB[m] == NULL) {
- FltUB[m] = new float[size];
- for (int o = 0; o < size; o++)
- FltUB[m][o] = 0.;
- }
+
+ if (UseMHD) {
+ for (int m = 0; m < 6; m++) {
+ if (FltBB[m] == NULL) {
+ FltBB[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FltBB[m][o] = 0.;
+ }
+ }
+ for (int m = 0; m < 3; m++) {
+ if (FltUB[m] == NULL) {
+ FltUB[m] = new float[size];
+ for (int o = 0; o < size; o++)
+ FltUB[m][o] = 0.;
+ }
+ }
}
int N = SGSFilterStencil/2;
@@ -281,6 +294,9 @@
BaryonField[Vel2Num][ifilter] * BaryonField[Vel3Num][ifilter];
FltrhoUU[XZ][igrid] += totalWeight * BaryonField[DensNum][ifilter] *
BaryonField[Vel1Num][ifilter] * BaryonField[Vel3Num][ifilter];
+
+ if (!UseMHD)
+ continue;
FltBB[XX][igrid] += totalWeight * BaryonField[B1Num][ifilter] * BaryonField[B1Num][ifilter];
FltBB[YY][igrid] += totalWeight * BaryonField[B2Num][ifilter] * BaryonField[B2Num][ifilter];
diff -r e51a5d4a47f9 -r 7625ee3b9da4 src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -1434,6 +1434,15 @@
SGSNeedMixedFilteredQuantities = 1;
+ if (! (HydroMethod == MHD_Li || HydroMethod == MHD_RK) && (
+ SGScoeffNLemfCompr != 0. ||
+ SGScoeffNLb != 0. ||
+ SGScoeffERS2J2 != 0. ||
+ SGScoeffERS2M2Star != 0. ||
+ SGScoeffSSb != 0. ||
+ SGScoeffSSemf != 0))
+ ENZO_FAIL("SGS terms related to MHD should be set to 0 for hydro sims.\n");
+
/* Now we know which hydro solver we're using, we can assign the
diff -r e51a5d4a47f9 -r 7625ee3b9da4 src/enzo/hydro_rk/Grid_SourceTerms.C
--- a/src/enzo/hydro_rk/Grid_SourceTerms.C
+++ b/src/enzo/hydro_rk/Grid_SourceTerms.C
@@ -262,6 +262,55 @@
}
}
+
+ if (UseSGSModel) {
+ // if an explicit filtering operation should be used, otherwise
+ // grid-scale quantities are used
+ if (SGSFilterWidth > 1.) {
+ if (this->SGSUtil_FilterFields() == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_FilterFields.\n");
+ return FAIL;
+ }
+
+ // if the partial derivatives of primitive variables are required
+ // in the calculation of the SGS models
+ if (SGSNeedJacobians) {
+ // velocity Jacobian
+ if (this->SGSUtil_ComputeJacobian(JacVel,FilteredFields[1],FilteredFields[2],FilteredFields[3]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(Vel).\n");
+ return FAIL;
+ }
+ }
+
+ // Scale-similarity type models need filtered mixed terms, such as flt(u_i u_j), etc.
+ if (SGSNeedMixedFilteredQuantities) {
+ if (this->SGSUtil_ComputeMixedFilteredQuantities() == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeMixedFilteredQuantities().\n");
+ return FAIL;
+ }
+
+ }
+
+ // SGSFilterWidth == 1
+ } else {
+
+ /* we don't need a special check for SGSNeedJacobians here as all models apart
+ * from the scale-similarity model need Jacbobians and the scale-similarity model
+ * always has SGSFilterWidth > 1.
+ */
+ if (this->SGSUtil_ComputeJacobian(JacVel,BaryonField[Vel1Num],BaryonField[Vel2Num],BaryonField[Vel3Num]) == FAIL) {
+ fprintf(stderr, "grid::MHDSourceTerms: Error in SGSUtil_ComputeJacobian(Vel).\n");
+ return FAIL;
+ }
+ }
+
+ if (this->SGSAddMomentumTerms(dU) == FAIL) {
+ fprintf(stderr, "grid::SourceTerms: Error in SGSAddMomentumTerms(dU).\n");
+ return FAIL;
+ }
+
+ }
+
/* Add centrifugal force for the shearing box */
https://bitbucket.org/enzo/enzo-dev/commits/20cc80373d61/
Changeset: 20cc80373d61
Branch: week-of-code
User: pgrete
Date: 2017-06-27 08:57:26+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 232 files
diff -r 7625ee3b9da4 -r 20cc80373d61 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -14,3 +14,4 @@
afb6ed40470bb505ab36a52c1bb1dd2944e3d9b9 enzo-2.3
d073462b1884d9653fc6dc2511a8bcdbb281993f enzo-2.4
2984068d220f8fce3470447e9c26be383ba48c9f enzo-2.5
+6300a72aca0a4d968bed49186e69f7cd5c7ce58c gold-standard-v1
diff -r 7625ee3b9da4 -r 20cc80373d61 bitbucket-pipelines.yml
--- /dev/null
+++ b/bitbucket-pipelines.yml
@@ -0,0 +1,7 @@
+# Use the default image
+
+pipelines:
+ default:
+ - step:
+ script:
+ - bash test.sh
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/EnzoTestSuite.rst
--- /dev/null
+++ b/doc/manual/source/developer_guide/EnzoTestSuite.rst
@@ -0,0 +1,570 @@
+.. _EnzoTestSuite:
+
+Enzo Test Suite
+===============
+
+The Enzo test suite is a set of tools whose purpose is to perform
+regression tests on the Enzo codebase, in order to help developers
+discover bugs that they have introduced, to verify that the code is
+producing correct results on new computer systems and/or compilers,
+and, more generally, to demonstrate that Enzo is behaving as expected
+under a wide variety of conditions.
+
+What's in the test suite?
+-------------------------
+
+The suite is composed of a large number of individual test problems
+that are designed to span the range of physics and dimensionalities
+that are accessible using the Enzo code, both separately and in
+various permutations. Tests can be selected based on a variety of
+criteria, including (but not limited to) the physics included, the
+estimated runtime of the test, and the dimensionality. The
+testing suite runs enzo on each selected test problem, produces
+a series of outputs, and then uses yt to process these outputs
+in a variety of different ways (making projections, looking at
+fields, etc.). The results of these yt analyses are then compared
+against similarly generated results from an earlier "good" version
+of the enzo code run on the same problems. In test problems where
+we have them, analytical solutions are compared against the test
+results (e.g. shocktubes). Lastly, a summary of these test results
+are returned to the user for interpretation.
+
+One can run individual tests or groups of tests using the various run time
+flags_. For convenience, three pre-created, overlapping sets of tests are
+provided. For each set of tests, one must generate their own standard locally
+against which she can compare different builds of the code.
+
+1. The "quick suite" (``--suite=quick``). This is composed of
+small calculations that test critical physics packages both
+alone and in combination. The intent of this package is to be run
+automatically and relatively frequently (multiple times a day) on
+a remote server to ensure that bugs have not been introduced during the code
+development process. All runs in the quick suite use no more than
+a single processor. The total run time should be about 15 minutes
+on the default lowest level of optimization..
+
+2. The "push suite" (``--suite=push``). This is a slightly
+large set of tests, encompassing all of the quick suite and
+some additional larger simulations that test a wider variety of physics
+modules. The intent of this package is to provide a thorough validation
+of the code prior to changes being pushed to the main repository. The
+total run time is roughly 60 minutes for default optimization, and
+all simulations use only a single processor.
+
+3. The "full suite" (``--suite=full``). This encompasses essentially
+all of test simulations contained within the run directory. This suite
+provides the most rigorous possible validation of the code in many different
+situations, and is intended to be run prior to major changes being pushed
+to the stable branch of the code. A small number of simulations in the full
+suite are designed to be run on 2 processors and will take multiple hours to
+complete. The total run time is roughly 60 hours for the default lowest
+level of optimization.
+
+.. _running:
+
+How to run the test suite
+-------------------------
+
+
+1. **Compile Enzo.** If you have already built enzo, you can skip this step and
+the test will use your existing enzo executable. To compile enzo with the
+standard settings, complete these commands:
+
+::
+
+ $ cd <enzo_root>
+ $ ./configure
+ $ cd ./src/enzo
+ $ make load-config-allphysics
+ $ make clean
+ $ make
+
+Note that you need not copy the resulting enzo executable to your path,
+since the enzo.exe will be symbolically linked from the src/enzo directory
+into each test problem directory before tests are run.
+
+This build configuration requires that the Hypre and Grackle libraries are
+installed and visible in your compiler's search paths. If you do not have these
+libraries available, then you can set:
+
+::
+
+ $ make grackle-no
+ $ make hypre-no
+
+.. note::
+
+ If Enzo is compiled without support for the grackle and hypre libraries, tests
+ of Enzo modules that depend on these libraries will likely fail.
+
+
+2. **Install the necessary Python libraries** The test suite works
+ with both Python 2.x and Python 3.x, but requires python-hglib
+ (https://pypi.python.org/pypi/python-hglib) to access Mercurial.
+ This should be installable via pip.
+
+
+3. **Get the correct yt version** The enzo tests are generated and compared
+using the yt analysis suite. You must be using yt 3.3.0 or newer in order for
+the test suite to work. If you do not yet have yt, visit
+http://yt-project.org/#getyt for installation instructions. If you already have
+yt and yt is in your path, make sure you are using the latest verion of yt by
+running the following commands:
+
+::
+
+ $ cd /path/to/yt_mercurial_repository
+ $ hg update yt
+ $ python setup.py develop
+
+4. **Generate answers to test with.** Run the test suite with these flags within
+the ``run/`` subdirectory in the enzo source hierarchy:
+
+::
+
+ $ cd <enzo_root>/run
+ $ ./test_runner.py --suite=quick -o <output_dir> --answer-store
+ --answer-name=<test_name> --local
+
+Note that we're creating test answers in this example with the quick suite, but
+we could just as well create a reference from any number of test problems using
+other test problem flags_.
+
+Here, we are storing the results from our tests locally in a file called
+<test_name> which will now reside inside of the ``<output_dir>``. If you want
+to, you can leave off ``--answer-name`` and get a sensible default.
+
+.. _directory layout:
+
+::
+
+ $ ls <output_dir>
+ fe7d4e298cb2 <test_name>
+
+ $ ls <output_dir>/<test_name>
+ <test_name>.db
+
+When we inspect this directory, we now see that in addition to the subdirectory
+containing the simulation results, we also have a <test_name> subdirectory which
+contains python-readable shelve files, in this case a dbm file. These are the
+files which actually contain the reference standard. You may have a different
+set of files or extensions depending on which OS you are using, but don't worry
+Python can read this no problem. Congratulations, you just produced your own
+reference standard. Feel free to test against this reference standard or tar
+and gzip it up and send it to another machine for testing.
+
+
+5. **Run the test suite using your local answers.** The testing suite operates
+by running a series of enzo test files throughout the ``run`` subdirectory.
+Note that if you want to test a specific enzo changeset, you must update to it
+and recompile enzo. You can initiate the quicksuite test simulations and their
+comparison against your locally generated answers by running the following
+commands:
+
+::
+
+ $ cd <enzo_root>/run
+ $ ./test_runner.py --suite=quick -o <output_dir> --answer-name=<test_name>
+ --local --clobber
+
+In this command, ``--output-dir=<output_dir>`` instructs the test runner to
+output its results to a user-specified directory (preferably outside of the enzo
+file hierarchy). Make sure this directory is created before you call
+test_runner.py, or it will fail. The default behavior is to use the quick
+suite, but you can specify any set of tests using the ``--suite`` or ``--name``
+flags_. We are comparing the simulation results against a local (``--local``)
+reference standard which is named ``<test_name>`` also located in the
+``<output_dir>`` directory. Note, we included the ``--clobber`` flag to rerun
+any simulations that may have been present in the ``<output_dir>`` under the
+existing enzo version's files, since the default behavior is to not rerun
+simulations if their output files are already present. Because we didn't set
+the ``--answer-store`` flag, the default behavior is to compare against the
+``<test_name>``.
+
+
+5. **Review the results.** While the test_runner is executing, you should see
+the results coming up at the terminal in real time, but you can review these
+results in a file output at the end of the run. The test_runner creates a
+subdirectory in the output directory you provided it, as shown in the example
+below.
+
+::
+
+ $ ls <output_dir>
+ fe7d4e298cb2
+
+ $ ls <output_dir>/fe7d4e298cb2
+ Cooling GravitySolver MHD test_results.txt
+ Cosmology Hydro RadiationTransport version.txt
+
+The name of this directory will be the unique hash of the version of
+enzo you chose to run with the testing suite. In this case it is
+``fe7d4298cb2``, but yours will likely be different, but equally
+unintelligible. You can specify an optional additional suffix to be
+appended to this directory name using ``--run-suffix=<suffix>``. This
+may be useful to distinguish multiple runs of a given version of enzo,
+for example with different levels of optimization. Within this
+directory are all of the test problems that you ran along with their
+simulation outputs, organized based on test type (e.g. ``Cooling``,
+``AMR``, ``Hydro``, etc.) Additionally, you should see a file called
+``test_results.txt``, which contains a summary of the test runs and
+which ones failed and why.
+
+My tests are failing and I don't know why
+-----------------------------------------
+
+A variety of things cause tests to fail: differences in compiler,
+optimization level, operating system, MPI submission method,
+and of course, your modifications to the code. Go through your
+``test_results.txt`` file for more information about which tests
+failed and why. You could try playing with the relative tolerance
+for error using the ``--tolerance`` flag as described in the flags_
+section. For more information regarding the failures of a specific
+test, examine the ``estd.out`` file in that test problem's subdirectory
+within the ``<output_dir>`` directory structure, as it contains the
+``STDERR`` and ``STDOUT`` for that test simulation.
+
+If you are receiving ``EnzoTestOutputFileNonExistent`` errors, it
+means that your simulation is not completing. This may be due to
+the fact that you are trying to run enzo with MPI which your
+system doesn't allow you to initiate from the command line.
+(e.g. it expects you to submit mpirun jobs to the queue).
+You can solve this problem by recompiling your enzo executable with
+MPI turned off (i.e. ``make use-mpi-no``), and then just pass the
+local_nompi machine flag (i.e. ``-m local_nompi``) to your
+test_runner.py call to run the executable directly without MPI support.
+Currently, only a few tests use multiple cores, so this is not a
+problem in the quick or push suites.
+
+If you see a lot of ``YTNoOldAnswer`` errors, it may mean that your simulation
+is running to a different output than what was reached for your locally
+generated answers does, and the test suite is trying to compare your last output
+file against a non-existent file in the answers. Look carefully at the
+results of your simulation for this test problem using the provided python file
+to determine what is happening. Or it may simply mean that you specified the
+wrong answer name.
+
+.. _flags:
+
+Descriptions of all the testing suite flags
+-------------------------------------------
+
+You can type ``./test_runner.py --help`` to get a quick summary of all
+of the command line options for the testing suite. Here is a more
+thorough explanation of each.
+
+**General flags**
+
+``-h, --help``
+ list all of the flags and their argument types (e.g. int, str, etc.)
+
+``-o str, --output-dir=str`` default: None
+ Where to output the simulation and results file hierarchy. Recommended
+ to specify outside of the enzo source hierarchy.
+
+``-m str, --machine=str`` default: local
+ Specify the machine on which you're running your tests. This loads
+ up a machine-specific method for running your tests. For instance,
+ it might load qsub or mpirun in order to start the enzo executable
+ for the individual test simulations. You can only use machine
+ names of machines which have a corresponding machine file in the
+ ``run/run_templates`` subdirectory (e.g. nics-kraken). *N.B.*
+ the default, ``local``, will attempt to run the test simulations using
+ mpirun, so if you are required to queue on a machine to execute
+ mpirun, ``test_runner.py`` will silently fail before finishing your
+ simulation. You can avoid this behavior by compiling enzo without
+ MPI and then setting the machine flag to ``local_nompi``.
+
+``--repo=str`` default: current directory
+ Path to repository being tested.
+
+``--interleave`` default: False
+ Interleaves preparation, running, and testing of each
+ individual test problem as opposed to default batch
+ behavior.
+
+``--clobber`` default: False
+ Rerun enzo on test problems which already have
+ results in the destination directory
+
+``--tolerance=int`` default: see ``--strict``
+ Sets the tolerance of the relative error in the
+ comparison tests in powers of 10.
+
+ Ex: Setting ``--tolerance=3`` means that test results
+ are compared against the standard and fail if
+ they are off by more than 1e-3 in relative error.
+
+``--bitwise`` default: see ``--strict``
+ Declares whether or not bitwise comparison tests
+ are included to assure that the values in output
+ fields exactly match those in the reference standard.
+
+``--strict=[high, medium, low]`` default: low
+ This flag automatically sets the ``--tolerance``
+ and ``--bitwise`` flags to some arbitrary level of
+ strictness for the tests. If one sets ``--bitwise``
+ or ``--tolerance`` explicitly, they trump the value
+ set by ``--strict``. When testing enzo general
+ functionality after an installation, ``--strict=low``
+ is recommended, whereas ``--strict=high`` is suggested
+ when testing modified code against a local reference
+ standard.
+
+ ``high``: tolerance = 13, bitwise = True
+ ``medium``: tolerance = 6, bitwise = False
+ ``low``: tolerance = 3, bitwise = False
+
+``--sim-only`` default: False
+ Only run simulations, do not store the tests or compare them against a
+ standard.
+
+``--test-only`` default: False
+ Only perform tests on existing simulation outputs, do not rerun the simulations.
+
+``--time-multiplier=int`` default: 1
+ Multiply simulation time limit by this factor. Useful if you're on a slow
+ machine or you cannot finish the specified tests in their allocated time.
+
+``--run-suffix=str`` default: None
+ An optional suffix to append to the test run directory. Useful
+ to distinguish multiple runs of a given changeset.
+
+``-v, --verbose`` default: False
+ Verbose output in the testing sequence. Very good for tracking down
+ specific test failures.
+
+``--pdb`` default: False
+ When a test fails a pdb session is triggered. Allows interactive inspection
+ of failed test data.
+
+``--changeset=str`` default: latest
+ Changeset to use in simulation repo. If supplied,
+ make clean && make is also run
+
+
+**Flags for storing, comparing against different standards**
+
+``--answer-store`` default: False
+ Should we store the results as a reference or just compare
+ against an existing reference?
+
+``--answer-name=str`` default: latest gold standard
+ The name of the file where we will store our reference results,
+ or if ``--answer-store`` is false, the name of the reference against
+ which we will compare our results.
+
+``--local`` default: False
+ Store/Compare the reference standard locally (i.e. not on the cloud)
+
+
+**Flags not used**
+
+``--with-answer-testing`` default: False
+ DO NOT USE. This flag is used in the internal yt answer testing
+ and has no purpose in the enzo testing infrastructure.
+
+``--answer-big-data`` default: False
+ DO NOT USE. This flag is used in the internal yt answer testing
+ and has no purpose in the enzo testing infrastructure.
+
+**Flags for specifying test problems**
+
+These are the various means of specifying which test problems you want
+to include in a particular run of the testing suite.
+
+``--suite=[quick, push, full]`` default: None
+ A precompiled collection of several different test problems.
+ quick: 37 tests in ~15 minutes, push: 48 tests in ~30 minutes,
+ full: 96 tests in ~60 hours.
+
+``--answer_testing_script=str`` default: None
+
+``--AMR=bool`` default: False
+ Test problems which include AMR
+
+``--author=str`` default: None
+ Test problems authored by a specific person
+
+``--chemistry=bool`` default: False
+ Test problems which include chemistry
+
+``--cooling=bool`` default: False
+ Test problems which include cooling
+
+``--cosmology=bool`` default: False
+ Test problems which include cosmology
+
+``--dimensionality=[1, 2, 3]``
+ Test problems in a particular dimension
+
+``--gravity=bool`` default: False
+ Test problems which include gravity
+
+``--hydro=bool`` default: False
+ Test problems which include hydro
+
+``--max_time_minutes=float``
+ Test problems which finish under a certain time limit
+
+``--mhd=bool`` default: False
+ Test problems which include MHD
+
+``--name=str`` default: None
+ A test problem specified by name
+
+``--nprocs=int`` default: 1
+ Test problems which use a certain number of processors
+
+``--problematic=bool`` default: False
+ Test problems which are deemed problematic
+
+``--radiation=[None, fld, ray]`` default: None
+ Test problems which include radiation
+
+``--runtime=[short, medium, long]`` default: None
+ Test problems which are deemed to have a certain predicted runtime
+
+
+.. _bisect:
+
+How to track down which changeset caused your test failure
+----------------------------------------------------------
+
+In order to identify changesets that caused problems, we have
+provided the ``--bisect`` flag. This runs hg bisect on revisions
+between those which are marked as --good and --bad.
+
+hg bisect automatically manipulates the repository as it runs its
+course, updating it to various past versions of the code and
+rebuilding. In order to keep the tests that get run consistent through
+the course of the bisection, we recommend having two separate enzo
+installations, so that the specified repository (using ``--repo``) where
+this rebuilding occurs remains distinct from the repository where the
+testing is run.
+
+To minimize the number of tests run, bisection is only run on tests
+for which ``problematic=True``. This must be set by hand by the user
+before running bisect. It is best that this is a single test problem,
+though if multiple tests match that flag, failures are combined with "or"
+
+
+An example of using this method is as follows:
+
+::
+
+ $ echo "problematic = True" >> Cosmology/Hydro/AdiabaticExpansion/AdiabaticExpansion.enzotest
+ $ ./test_runner.py --output-dir=/scratch/dcollins/TESTS --repo=/SOMEWHERE_ELSE
+ --answer-compare-name=$mylar/ac7a5dacd12b --bisect --good=ac7a5dacd12b
+ --bad=30cb5ff3c074 -j 8
+
+To run preliminary tests before bisection, we have also supplied the
+``--changeset`` flag. If supplied, ``--repo`` is updated to
+``--changeset`` and compiled. Compile errors cause ``test_runner.py``
+to return that error, otherwise the tests/bisector is run.
+
+.. _new_test:
+
+How to add a new test to the library
+------------------------------------
+
+It is hoped that any newly-created or revised physics module will be
+accompanied by one or more test problems, which will ensure the
+continued correctness of the code. This sub-section explains the
+structure of the test problem system as well as how to add a new test
+problem to the library.
+
+Test problems are contained within the ``run/`` directory in the
+Enzo repository. This subdirectory contains a tree of directories
+where test problems are arranged by the primary physics used in that
+problem (e.g., Cooling, Hydro, MHD). These directories may be further
+broken down into sub-directories (Hydro is broken into Hydro-1D,
+Hydro-2D, and Hydro-3D), and finally into individual directories
+containing single problems. A given directory contains, at minimum,
+the Enzo parameter file (having extension ``.enzo``, described in
+detail elsewhere in the manual) and the Enzo test suite parameter file
+(with extension ``.enzotest``). The latter contains a set of
+parameters that specify the properties of the test. Consider the test
+suite parameter file for InteractingBlastWaves, which can be found in the
+``run/Hydro/Hydro-1D/InteractingBlastWaves`` directory:
+
+::
+
+ name = 'InteractingBlastWaves'
+ answer_testing_script = None
+ nprocs = 1
+ runtime = 'short'
+ hydro = True
+ gravity = False
+ AMR = True
+ dimensionality = 1
+ max_time_minutes = 1
+ fullsuite = True
+ pushsuite = True
+ quicksuite = True
+
+This allows the user to specify the dimensionality, physics used, the
+runtime (both in terms of 'short', 'medium', and 'long' calculations,
+and also in terms of an actual wall clock time). A general rule for
+choosing the runtime value is 'short' for runs taking less than 5 minutes,
+'medium' for run taking between 5 and 30 minutes, and 'long' for runs taking
+more than 30 minutes. If the test problem runs successfully in any amount
+of time, it should be in the full suite, selected by setting
+``fullsuite=True``. If the test runs in a time that falls under 'medium'
+or 'short', it can be added to the push suite (``pushsuite=True``). If
+the test is 'short' and critical to testing the functionality of the code,
+add it to the quick suite (``quicksuite=True``).
+
+Once you have created a new problem type in Enzo and thoroughly
+documented the parameters in the Enzo parameter list, you should
+follow these steps to add it as a test problem:
+
+1. Create a fork of Enzo.
+
+2. Create a new subdirectory in the appropriate place in the
+``run/`` directory. If your test problem uses multiple pieces of
+physics, put it under the most relevant one.
+
+3. Add an Enzo parameter file, ending in the extension ``.enzo``,
+for your test problem to that subdirectory.
+
+4. Add an Enzo test suite parameter file, ending in the extension
+``.enzotest``. In that file, add any relevant parameters as described
+above.
+
+5. By default, the final output of any test problem will be tested by
+comparing the min, max, and mean of a set of fields. If you want to
+have additional tests performed, create a script in the problem type
+directory and set the ``answer_testing_script`` parameter in the
+``.enzotest`` file to point to your test script. For an example of
+writing custom tests, see
+``run/Hydro/Hydro-3D/RotatingCylinder/test_rotating_cylinder.py``.
+
+6. Submit a Pull Request with your changes and indicate that you have
+created a new test to be added to the testing suites.
+
+Congratulations, you've created a new test problem!
+
+
+What to do if you fix a bug in Enzo
+-----------------------------------
+
+It's inevitable that bugs will be found in Enzo, and that some of
+those bugs will affect the actual simulation results (and thus the
+test problems used in the problem suite). Here is the procedure for
+doing so:
+
+1. Run the "push suite" of test problems (``--pushsuite=True``)
+for your newly-revised version of Enzo, and determine which test
+problems now fail.
+
+2. Visually inspect the failed solutions, to ensure that your new
+version is actually producing the correct results!
+
+3. Email the enzo-developers mailing list at
+enzo...@googlegroups.com to explain your bug fix, and to show the
+results of the now-failing test problems.
+
+4. Create a pull request for your fix.
+
+.. _http://yt-project.org/#getyt: http://yt-project.org/#getyt
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/NewLocalOperator.rst
--- a/doc/manual/source/developer_guide/NewLocalOperator.rst
+++ b/doc/manual/source/developer_guide/NewLocalOperator.rst
@@ -32,7 +32,7 @@
#. Read it, and understand the structure. The flowcharts can help,
- they can be found in :doc:`../user_guide/FlowChart`.
+ they can be found in :doc:`../supplementary_info/FlowChart`.
#. Add a parameter to drive your code in :doc:`AddingNewParameters`
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/NewTestProblem1.rst
--- a/doc/manual/source/developer_guide/NewTestProblem1.rst
+++ b/doc/manual/source/developer_guide/NewTestProblem1.rst
@@ -27,7 +27,7 @@
so read these pages carefully.
We strongly recommend reading everything that proceeds this page on
-the :doc:`../tutorials/index` page and the page about version control
+the :doc:`../user_guide/index` page and the page about version control
and regression testing, :doc:`ModificationIntro`.
Lastly, please give your problem a reasonable name. I'll be using
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/index.rst
--- a/doc/manual/source/developer_guide/index.rst
+++ b/doc/manual/source/developer_guide/index.rst
@@ -13,6 +13,7 @@
ModificationIntro.rst
ProgrammingGuide.rst
+ EnzoTestSuite.rst
FilenameConventions.rst
DebuggingWithGDB.rst
FineGrainedOutput.rst
@@ -29,3 +30,4 @@
using_mhd.rst
mhdct_details.rst
DoingARelease.rst
+
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/mhdct_details.rst
--- a/doc/manual/source/developer_guide/mhdct_details.rst
+++ b/doc/manual/source/developer_guide/mhdct_details.rst
@@ -181,7 +181,7 @@
The staggered magnetic field is stored in ``MagneticField``, and electric field, ``ElectricField``, is
centered on the zone edges. ``MagneticField``, being stored on the faces of the
zones, has one additional point along each component. For instance, if a ``grid`` had dimensions :math:`n_x, n_y, n_z` then
- :math:`B_x` will have dimensions :math:`n_x+1, n_y, n_z`. ``ElectricField`` has additional points transverse to the direction
+:math:`B_x` will have dimensions :math:`n_x+1, n_y, n_z`. ``ElectricField`` has additional points transverse to the direction
of the component, so :math:`E_x` has dimensions :math:`n_x, n_y+1, n_z+1`.
There are several helper variables, such as ``MagneticDims[3][3]``,
``ElectricDims[3][3]``, ``MagneticSize[3]``, and ``ElectricSize[3]`` to describe
@@ -231,28 +231,3 @@
magnetic field. This is formally equivalent to projection plus flux correction,
but doesn't have as many cases to check and grid interactions to worry about.
This is done in ``EvolveLevel`` by the routine ``Grid_MHD_UpdateMagneticField``
-
-Future Work (or, "Projects for Interested Students")
-----------------------------------------------------
-
-Most neighbor searching throughout Enzo is done with the Fast Neighbor Locator,
-which uses a chaining mesh to identify neighbors. This is not done for the
-communication done in ``SendOldFineGrids,`` but should be.
-
-Additionally, both ``SendOldFineGrids`` and the electric field projection need
-to be updated to work with the 3 phase non-blocking communication
-
-In principle, the CT machinery can be used in conjunction with the MHD-RK
-machinery. Interested students can contact dcollins for further instruction.
-
-Presently MHD-CT needs additional layers of ghost zones over the base hydro. I
-believe that I can reduce this by communicating the electric field, which will
-improve memory overhead. Again, interested parties can contact me for details.
-
-Multi-species needs to be tested.
-
-The mhd interpolation routine, ``mhd_interpolate.F``, could use to be re-factored. The interested
-student can feel free to contact David Collins.
-
-
-
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/developer_guide/using_mhd.rst
--- a/doc/manual/source/developer_guide/using_mhd.rst
+++ b/doc/manual/source/developer_guide/using_mhd.rst
@@ -18,15 +18,8 @@
Difficulty Easy Two pitfalls
====================== ==================== ===============
-
-Cosmology
-=========
-
-See the note at the bottom of the page for cosmology details. The method papers
-are now out of date.
-
Use of Dedner
-=============
+-------------
The Dedner method (``HydroMethod = 4``) is relatively straight forward.
The three magnetic components are stored in ``BaryonField``, with the relevant
@@ -43,7 +36,7 @@
Use of MHD-CT
-=============
+-------------
Use of MHD-CT (``HydroMethod = 6``) is somewhat complicated by the staggered nature of the magnetic field. This allows the
field to be updated by the curl of an electric field, thus preserving
@@ -84,7 +77,7 @@
<http://adsabs.harvard.edu/abs/2010ApJS..186..308C>`_.
Controlling MHD in the code
-===========================
+---------------------------
Within the code, there are several flags to control use of magnetic fields.
@@ -109,7 +102,7 @@
Implementation details for MHDCT can be found in :ref:`mhdct_details`
Cosmology
-=========
+---------
As of January 2015, the cosmology has been modified slightly in MHDCT. This was
done in order to rectify the treatment of cosmology in the bulk of the code as
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/faq/index.rst
--- /dev/null
+++ b/doc/manual/source/faq/index.rst
@@ -0,0 +1,81 @@
+Frequently Asked Questions
+==========================
+
+Building Enzo
+-------------
+
+
+**Q: I’m getting a compilation error that looks something like:**
+::
+
+ Grid_ComputeCoolingTime.C(201): error: expression must have class type
+ if (grackle_data.use_grackle == TRUE)
+
+**what does this mean?**
+
+
+A: Check what branch you are on using with Enzo (``$ hg branch``). This issue is
+likely related to a version conflict between Enzo and Grackle 2.0 and Grackle
+3.0. Currently the stable branch only works with Grackle 2.0, and the
+development branch (``$ hg checkout week-of-code``) works with Grackle 3.0. We
+recommend switching to using the development branch and using the most recent
+version of Grackle 3.0 instead of using Grackle 2.0 if you need Grackle for
+your simulations.
+
+
+**Q: I’m getting a compilation error related to HDF5. What is HDF5 and how to I get it?**
+
+A: HDF5 is a data format with accompanying library for writing very large
+data sets. Enzo uses HFD5 for data output. If you do not have a version of HDF5
+available on your machine, you can download binaries or source code for HDF5
+from https://www.hdfgroup.org/downloads/hdf5/. Once you have a version of HDF5
+installed on your machine, you need to notify Enzo where it is located for the
+build process in the Makefile (eg. ``Make.mach.linux-gnu`` or
+``Make.mach.my-machine``). For example, if HDF5 was installed in
+``/home/enzo-user/local/hdf5/``, you would edit the line
+::
+
+ LOCAL_HDF5_INSTALL = /home/enzo-user/local/hdf5
+
+then run
+::
+
+ $ make machine-linux-gnu
+ $ make clean
+ $ make
+
+to rebuild enzo.exe with your HDF5 installation. When running enzo.exe, make
+sure that the HDF5 library is in ``LD_LIBRARY_PATH``. In this example, if you
+are running bash, run the command
+::
+
+ $ export LD_LIBRARY_PATH=/home/enzo-user/local/hdf5/lib/:$LD_LIBRARY_PATH
+
+to put the HDF5 library in the library path before running Enzo.
+
+
+Running Simulations
+-------------------
+
+Common Crashes
+--------------
+
+
+Misc.
+-----
+
+
+**Q: What is the difference between enzo-dev (week-of-code) and the stable
+branch? Should I only use the stable branch?**
+
+A:
+
+The "week-of-code" branch of enzo-dev is the primary development branch, which
+is updated on a fairly regular basis (the name "week-of-code" is historical).
+Changes are migrated into the stable branch on a roughly annual basis. In
+general, if you want code that is somewhat more reliable but may be
+significantly behind the cutting-edge Enzo version, you should use the 'stable'
+branch. If you are comfortable with more recent (and thus possibly less
+reliable) code, you should use the "week-of-code" branch.
+
+
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/index.rst
--- a/doc/manual/source/index.rst
+++ b/doc/manual/source/index.rst
@@ -19,13 +19,15 @@
:maxdepth: 2
EnzoLicense.rst
- tutorials/index.rst
user_guide/index.rst
+ supplementary_info/index.rst
parameters/index.rst
physics/index.rst
+ faq/index.rst
developer_guide/index.rst
reference/index.rst
presentations/index.rst
+ faq/index.rst
Enzo Mailing Lists
-----------------------
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/cooling.rst
--- a/doc/manual/source/parameters/cooling.rst
+++ b/doc/manual/source/parameters/cooling.rst
@@ -81,13 +81,13 @@
(Metal_Density) and two additional metal fields (Z_Field1 and
Z_Field2). Acceptable values are 1 or 0, Default: 0 (off).
``ThreeBodyRate`` (external)
- Which Three Body rate should be used for H2 formation?: 0 = Abel, Bryan, Norman 2002, 1 = PSS83, 2= CW83, 3 = FH07, 4= G08. (Turk et al 2011 covers these)
+ Which Three Body rate should be used for H2 formation?: 0 = Abel, Bryan, Norman 2002, 1 = PSS83, 2= CW83, 3 = FH07, 4= G08. (See `Turk et al 2011 <http://adsabs.harvard.edu/abs/2011ApJ...726...55T>`)
``CIECooling`` (external)
- Should CIE (Ripamonti & Abel 2004) cooling be included at high densities?
+ Should CIE (`Ripamonti & Abel 2004 <http://adsabs.harvard.edu/abs/2004MNRAS.348.1019R>`) cooling be included at high densities?
``H2OpticalDepthApproximation`` (external)
- Should the H2 cooling be attenuated (RA04)?
+ Should the H2 cooling be attenuated? Taken from `Ripamonti & Abel 2004 <http://adsabs.harvard.edu/abs/2004MNRAS.348.1019R>`. Default: 1?
``H2FormationOnDust`` (external)
- Turns on H2 formation on dust grains and gas-grain heat transfer following Omukai (2000). Default: 0 (OFF)
+ Turns on H2 formation on dust grains and gas-grain heat transfer following `Omukai (2000) <http://adsabs.harvard.edu/abs/2000ApJ...534..809O>`. Default: 0 (OFF)
``NumberOfDustTemperatureBins`` (external)
Number of dust temperature bins for the dust cooling and H2 formation rates. Default: 250
``DustTemperatureStart`` (external)
@@ -98,7 +98,7 @@
Flag to write out the dust temperature field. Default: 0
``PhotoelectricHeating`` (external)
If set to be 1, the following parameter will be added uniformly
- to the gas without any shielding (Tasker & Bryan 2008). Default: 0
+ to the gas without any shielding (`Tasker & Bryan 2008 <http://adsabs.harvard.edu/abs/2008ApJ...673..810T>`). Default: 0
``PhotoelectricHeatingRate`` (external)
This is the parameter used as Gamma_pe for uniform photoelectric heating.
Default: 8.5e-26 erg s^-1 cm^-3
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/gravity.rst
--- a/doc/manual/source/parameters/gravity.rst
+++ b/doc/manual/source/parameters/gravity.rst
@@ -19,27 +19,6 @@
This is the gravitational constant to be used in code units. For cgs units it
should be 4\*pi\*G. For cosmology, this value must be 1 for the
standard units to hold. A more detailed decription can be found at :ref:`EnzoInternalUnits`. Default: 4\*pi.
-``GreensFunctionMaxNumber`` (external)
- The Green's functions for the gravitational potential depend on the
- grid size, so they are calculated on a as-needed basis. Since they
- are often re-used, they can be cached. This integer indicates the
- number that can be stored. They don't take much memory (only the
- real part is stored), so a reasonable number is 100. [Ignored in
- current version]. Default: 1
-``GreensFunctionMaxSize``
- Reserved for future use.
-``S2ParticleSize`` (external)
- This is the gravitational softening radius, in cell widths, in
- terms of the S2 particle described by Hockney and Eastwood in their
- book Computer Simulation Using Particles. A reasonable value is
- 3.0. [Ignored in current version]. Default: 3.0
-``GravityResolution`` (external)
- This was a mis-guided attempt to provide the capability to increase
- the resolution of the gravitational mesh. In theory it still works,
- but has not been recently tested. Besides, it's just not a good
- idea. The value (a float) indicates the ratio of the gravitational
- cell width to the baryon cell width. [Ignored in current version].
- Default: 1
``PotentialIterations`` (external)
Number of iterations to solve the potential on the subgrids. Values
less than 4 sometimes will result in slight overdensities on grid
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/hierarchy.rst
--- a/doc/manual/source/parameters/hierarchy.rst
+++ b/doc/manual/source/parameters/hierarchy.rst
@@ -250,7 +250,7 @@
``MustRefineParticlesRefineToLevel`` using the boxsize and redshift
information. Default: 0 (FALSE)
``MustRefineParticlesMinimumMass`` (external)
- This was an experimental parameter to set a minimum for ``MustRefineParticles``. Default: 0.0
+ This was an experimental parameter to set a minimum for ``MustRefineParticles``. Default: 0.0
``MustRefineParticlesRegionLeftEdge`` (external)
Bottom-left corner of a region in which dark matter particles are flagged
as ``MustRefineParticles`` in nested cosmological simulations. To be used with
@@ -286,6 +286,7 @@
``AvoidRefineRegionLevel[#]`` (external)
This parameter is used to limit the refinement to this level in a
rectangular region. Up to MAX_STATIC_REGIONS regions can be used.
+ Default: IND_UNDEFINED
``AvoidRefineRegionLeftEdge[#]``, ``AvoidRefineRegionRightEdge[#]`` (external)
These two parameters specify the two corners of a region that
limits refinement to a certain level (see the previous
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/hydro.rst
--- a/doc/manual/source/parameters/hydro.rst
+++ b/doc/manual/source/parameters/hydro.rst
@@ -329,12 +329,13 @@
This parameter is used to add resistivity and thereby update magnetic fields in some set-ups; see ComputeResistivity in hydro_rk/Grid_AddResistivity.C. Default: 0
``UsePhysicalUnit`` (external)
For some test problems (mostly in hydro_rk), the relevant parameters could be defined in physical CGS units. Default: 0
+``MixSpeciesAndColors`` (external)
+ This parameter enables color fields to be evolved as species in the MUSCL solvers. If ``PopIIISupernovaUseColour`` is on, this must also be turned on to trace the metal field. Default: 1
+
``SmallT`` (external)
Minimum value for temperature in hydro_rk/EvolveLevel_RK.C. Default: 1e-10 (note that the default value assumes UsePhysicalUnit = 1)
``SmallP``
[not used]
-``RKOrder``
- [not used]
``Theta_Limiter`` (external)
Flux limiter in the minmod Van Leer formulation. Must be between 1 (most dissipative) and 2 (least dissipative). Default: 1.5
``Coordinate`` (external)
@@ -358,14 +359,3 @@
``ResetMagneticFieldAmplitude`` (external)
The magnetic field values (in Gauss) that will be used for the
above parameter. Default: 0.0 0.0 0.0
-``CoolingCutOffDensity1``
- Reserved for future use
-``CoolingCutOffDensity2``
- Reserved for future use
-``CoolingCutOffTemperature``
- Reserved for future use
-``CoolingPowerCutOffDensity1``
- Reserved for future use
-``CoolingPowerCutOffDensity2``
- Reserved for future use
-
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/problemtypes.rst
--- a/doc/manual/source/parameters/problemtypes.rst
+++ b/doc/manual/source/parameters/problemtypes.rst
@@ -1,5 +1,5 @@
Problem Type Parameters
------------------------
+--------------------
``ProblemType`` (external)
This integer specifies the type of problem to be run. Its value
@@ -1245,7 +1245,7 @@
.. _stochastic_forcing_param:
Turbulence Simulation with Stochastic Forcing (59)
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Typical quasi-isothermal "turbulence-in-a-box" problem with non-static driving field.
For details on stochastic forcing, see Schmidt et al. 2009 A&A 494, 127-145
http://dx.doi.org/10.1051/0004-6361:200809967
@@ -1694,7 +1694,7 @@
.. _agndisk_param:
AGN Disk (207)
-~~~~~~~~~~~~~~
+~~~~~~~~~~~~
``DiskType`` (external)
Default: 1
@@ -1714,10 +1714,11 @@
Initial height of the disk. Default: 1
.. _poissonsolver_param:
-.. _shocktube_param:
+
+.. _cr_shocktube_param:
CR Shock Tube (250: unigrid and AMR)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Very similar to normal shock tube (see problem 1) but includes CR
component. See Salem, Bryan & Hummels (2014) for discussion.
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/parameters/starform.rst
--- a/doc/manual/source/parameters/starform.rst
+++ b/doc/manual/source/parameters/starform.rst
@@ -228,7 +228,7 @@
thermal energy at the end of the star's life. Units are in parsecs.
Default: 1.
``PopIIISupernovaUseColour`` (external)
- Set to 1 to trace the metals expelled from supernovae. Default: 0.
+ Set to 1 to trace the metals expelled from supernovae. If using ``HydroMethod`` 3 or 4, also set ``MixSpeciesAndColors`` to 1 to trace metals. Default: 0.
``PopIIIUseHypernovae`` (external)
Set to 1 to use the hypernova energies and metal ejecta masses
from Nomoto et al. (2006). If set to 0, then the supernova
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/physics/magnetic-feedback.png
Binary file doc/manual/source/physics/magnetic-feedback.png has changed
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/physics/star_particles.rst
--- a/doc/manual/source/physics/star_particles.rst
+++ b/doc/manual/source/physics/star_particles.rst
@@ -538,3 +538,61 @@
compiled into the executable. For a more stable version of the
algorithm, use Method 1.
+
+Magnetic Supernova Feedback
+----------------------------
+*Source: hydro_rk/SuperNovaSeedField.C*
+
+Select this method by setting ``UseSupernovaSeedFieldSourceTerms = 1``
+(Default = 0) and
+specifying the following parameters:
+
+``SupernovaSeedFieldTotalEnergy`` (in units of ergs) is the total amount
+of magnetic energy to be injected by a single supernova event. Defualt = 0.0.
+
+``SupernovaSeedFieldRadius`` (in units of parsecs) gives the scale over
+which to inject supernova energy. The injection mechanism normalizes the
+spatial exponential decay of the injected supernova energy so that all of the
+energy is contained within the specified radius. For this reason, the
+``SupernovaSeedFieldRadius`` should be at least 3 times the minimum cell width of
+the simulation. Default = 0.0.
+
+``SupernovaSeedFieldDuration`` (in units of Myr) gives the duration of the
+supernova magnetic energy injection. The injection mechanism is normalized so
+that all of the ``SupernovaSeedFieldTotalEnergy`` is injected over this
+time scale. In order to inject the correct amount of energy, ``SupernovaSeedFieldDuration`` should be set to at least 4
+times the minimum time step of the simulation. Default = 0.0.
+
+
+The following applies to Methods 0 (Cen & Ostriker) and 1 (+
+stochastic star formation). The magnetic feedback method is described fully in `Butsky et al. (2017)
+<https://arxiv.org/abs/1610.08528>`_.
+
+When a star cluster particle reaches the end of its lifetime, we inject a
+toroidal loop of magnetic field at its position in *hydro_rk/Grid_MHDSourceTerms*. The spatial and temporal
+evolution of the injected magnetic energy and magnetic field is chosen to be:
+
+ .. math::
+
+ \dot{U}_{B,\, {source}} = \tau^{-1} \frac{B_0^2}{4\pi} \frac{R}{L}
+ e^{-r^2/L^2} e^{-t/\tau} (1-e^{-t/\tau})\\
+ \mathbf{\dot{B}}_{source} = \tau^{-1} B_0 \left(\frac{R}{L}\right)^{1/2}
+ e^{- r^2 / 2L^2} e^{-t / \tau} \, \hat{\mathbf{e}}_\phi
+
+where t is the time since the 'death' of the star cluster particle,
+:math:`\tau` is the ``SupernovaSeedFieldDuration``, R is the cylindrical
+radius, r is the spherical radius, and L is the ``SupernovaSeedFieldRadius``.
+
+.. figure:: magnetic-feedback.png
+ :align: center
+ :scale: 25%
+ :alt: Magnetic feedback schematic.
+
+Two-dimensional schematic overview of the life cycle of a star
+cluster particle and two channels of its feedback. Left: Star cluster
+particle formation. Middle: Thermal feedback. Thermal energy by Type II
+supernova explosion is injected into the gas cell in which a star cluster
+particle of age less than 120 Myr resides. Right: Magnetic
+feedback. Toroidal magnetic fields are seeded within three finest cells
+from a star cluster particle.
+
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/reference/HierarchyFile.rst
--- /dev/null
+++ b/doc/manual/source/reference/HierarchyFile.rst
@@ -0,0 +1,297 @@
+The Enzo Hierarchy File - Explanation and Usage
+===============================================
+
+The Enzo Hierarchy file is a representation of the internal memory
+state of the entire hierarchy of grids. As such, its format --
+while somewhat obtuse at first -- reflects that context. Each grid
+entry has a set number of fields that describe its position in
+space, as well as the fields that are affiliated with that grid:
+
+Note: We are in the process of transitioning to an `HDF5-formatted
+Hierarchy File`_.
+
+.. highlight:: none
+
+::
+
+ Grid = 1
+ Task = 4
+ GridRank = 3
+ GridDimension = 38 22 22
+ GridStartIndex = 3 3 3
+ GridEndIndex = 34 18 18
+ GridLeftEdge = 0 0 0
+ GridRightEdge = 1 0.5 0.5
+ Time = 646.75066015177
+ SubgridsAreStatic = 0
+ NumberOfBaryonFields = 8
+ FieldType = 0 1 4 5 6 19 20 21
+ BaryonFileName = ./RD0005/RedshiftOutput0005.cpu0000
+ CourantSafetyNumber = 0.300000
+ PPMFlatteningParameter = 0
+ PPMDiffusionParameter = 0
+ PPMSteepeningParameter = 0
+ NumberOfParticles = 20
+ ParticleFileName = ./RD0005/RedshiftOutput0005.cpu0000
+ GravityBoundaryType = 0
+ Pointer: Grid[1]->NextGridThisLevel = 2
+
+The final field, starting with "Pointer", is slightly more
+complicated and will be discussed below.
+
+``Grid = 1``
+
+ This is the ID of the grid. Enzo grids are indexed internally
+ starting at 1.
+
+``Task = 3``
+
+ This grid was written by processor 3 and will be read in by it if
+ restarting more than 4 processors.
+
+``GridRank = 3``
+
+ This is the dimensionality of the grid.
+
+``GridDimension = 38 22 22``
+
+ Dimensions, *including* ghost zones.
+
+``GridStartIndex = 3 3 3``
+
+ The first index of data values *owned* by this grid.
+
+``GridEndIndex = 34 18 18``
+
+ The final index *owned* by this grid. The active zones have
+ dimensionality of GridEndIndex - GridStartIndex + 1.
+
+``GridLeftEdge = 0 0 0``
+
+ In code units, between ``DomainLeftEdge`` and ``DomainRightEdge``,
+ the origin of this grid.
+
+``GridRightEdge = 1 0.5 0.5``
+
+ In code units, between ``DomainLeftEdge`` and ``DomainRightEdge``,
+ the right-edge of this grid. ``dx = (GridRightEdge -
+ GridLeftEdge)/(GridEndIndex - GridStartIndex + 1)``.
+
+
+``Time = 646.75066015177``
+
+ The current time (in code units) to which the baryon values in this
+ grid have been evolved.
+
+
+``SubgridsAreStatic = 0``
+
+ Whether refinement can occur in the subgrids.
+
+``NumberOfBaryonFields = 8``
+
+ The number of data fields associated with this grid.
+
+``FieldType = 0 1 4 5 6 19 20 21``
+
+ The integer identifiers of each field, in order, inside this grid.
+
+``BaryonFileName = ./RD0005/RedshiftOutput0005.cpu0000``
+
+ The HDF5 file in which the baryons fields are stored.
+
+``CourantSafetyNumber = 0.300000``
+
+ Courant safety number for this grid (governs timestepping.)
+
+``PPMFlatteningParameter = 0``
+
+ Flattening parameter for this grid (governs PPM hydro.)
+
+``PPMDiffusionParameter = 0``
+
+ Diffusion parameter for this grid (governs PPM hydro.)
+
+``PPMSteepeningParameter = 0``
+
+ Steepening parameter for this grid (governs PPM hydro.)
+
+``NumberOfParticles = 20``
+
+ How many particles are located in this grid at this timestep.
+
+``ParticleFileName = ./RD0005/RedshiftOutput0005.cpu0000``
+
+ The HDF5 file in which the baryon fields and particle data are
+ stored. This field will not exist if there aren't any particles in
+ the grid.
+
+``GravityBoundaryType = 0``
+
+ Boundary type inside gravity solver.
+
+
+
+
+HDF5-formatted Hierarchy File
+-----------------------------
+
+We are transitioning to an HDF5-formatted hierarchy file. This is an
+improvement because reading a large (many thousand grid) ASCII
+hierarchy file take a long time, and be a possible cause of precision
+errors in deep hierarchies.
+
+The structure of the file:
+
+Although HDF5 tools like 'h5ls' and 'h5dump' can be used to explore
+the structure of the file, it's probably easiest to use python and
+h5py. This is how to open an example hierarchy file (from
+run/Cosmology/Hydro/AMRCosmologySimulation) in python.
+
+::
+
+ >>> import h5py
+ >>> f = h5py.File('RD0007/RedshiftOutput0007.hierarchy.hdf5','r')
+
+The root group ('/') contains a number of attributes.
+
+::
+
+ >>> f.attrs.keys()
+ ['Redshift', 'NumberOfProcessors', 'TotalNumberOfGrids']
+ >>> f.attrs['Redshift']
+ 0.0
+ >>> f.attrs['NumberOfProcessors']
+ 1
+ >>> f.attrs['TotalNumberOfGrids']
+ 44
+
+So we see that this is a z=0 output from a simulation run on a single
+core and it contains a total of 44 grids.
+
+Now let's look at the groups contained in this file.
+
+::
+
+ >>> f.keys()
+ ['Level0', 'Level1', 'Level2', 'LevelLookupTable']
+
+The simulation has two levels of refinement, so there are a total of
+three HDF5 groups that contain information about the grids at each
+level. Additionally, there is one more dataset ('LevelLookupTable')
+that is useful for finding which level a given grid belongs to. Let's
+have a closer look.
+
+::
+
+ >>> level_lookup = f['LevelLookupTable']
+ >>> level_lookup.shape
+ (44,)
+ >>> level_lookup[:]
+ array([0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
+
+This shows you that the first grid is on level 0, the second on level
+1, and all the remaining grids on level 2. Let's have a look at the
+'Level2' group.
+
+::
+
+ >>> g = f['Level2']
+ >>> g.keys()
+ ['Grid00000003', 'Grid00000004', 'Grid00000005', ..., 'Grid00000043', 'Grid00000044']
+
+Each level group also has one attribute, 'NumberOfGrids'.
+
+::
+
+ >>> g.attrs['NumberOfGrids']
+ 42
+
+The hierarchy information about each of the grids is stored as both
+attributes and datasets.
+
+::
+
+ >>> grid = g['Grid00000003']
+ >>> grid.attrs.keys()
+ ['Task', 'GridRank', 'Time', 'OldTime', 'SubgridsAreStatic', 'NumberOfBaryonFields', 'FieldType',
+ 'BaryonFileName', 'CourantSafetyNumber', 'PPMFlatteningParameter', 'PPMDiffusionParameter',
+ 'PPMSteepeningParameter', 'ParticleFileName', 'GravityBoundaryType', 'NumberOfDaughterGrids',
+ 'NextGridThisLevelID', 'NextGridNextLevelID']
+ >>> grid.keys()
+ ['GridDimension', 'GridEndIndex', 'GridGlobalPosition',
+ 'GridLeftEdge', 'GridRightEdge', 'GridStartIndex', 'NumberOfParticles']
+
+Besides the parameters that have been described above, there are few
+new elements:
+
+``GridGlobalPosition`` is LeftGridEdge[] expressed in integer indices
+of this level, i.e. running from 0 to RootGridDimension[] *
+RefinementFactors[]**level - 1. This may be useful for re-calculating
+positions in long double precision (which is not universally supported
+by HDF5) at runtime.
+
+
+``NumberOfDaughterGrids`` gives you the number of daughter grids.
+
+
+``DaughterGrids`` is a group that contains HDF5-internal soft links to
+the daugher datasets. Example:
+
+::
+
+ >>> daughters = grid['DaughterGrids']
+ >>> daughters.keys()
+ ['DaughterGrid0000', 'DaughterGrid0001', 'DaughterGrid0002', ..., 'DaughterGrid0041']
+ >>> daughters.get('DaughterGrid0000', getlink=True)
+ <SoftLink to "/Level2/Grid00000003">
+
+In this case there are 42 daughter grids.
+
+
+``ParentGrids`` is a group that contains HDF5-internal soft links to
+parent grids on all levels above the present grid's level. Example for
+a level 2 grid:
+
+::
+
+ >>> grid = f['Level2']['Grid00000044']
+ >>> parents = grid['ParentGrids']
+ >>> parents.keys()
+ ['ParentGrid_Level0', 'ParentGrid_Level1']
+ >>> parents.get('ParentGrid_Level0', getlink=True)
+ <SoftLink to "/Level0/Grid00000001">
+
+Lastly, there's one additional (experimental) feature that is
+available only if you've compiled with verson 1.8+ of HDF5. In that
+case you can set '#define HAVE_HDF5_18' in
+Grid_WriteHierarchyInformationHDF5.C [perhaps this should become a
+Makefile configuration option?], and then there will be an external
+HDF5 link to the HDF5 file containing the actual data for that grid. Example:
+
+::
+
+ >>> grid.get('GridData', getlink=True)
+ >>><ExternalLink to "Grid00000002" in file "./RD0007/RedshiftOutput0007.cpu0000"
+
+
+.. _controlling_the_hierarhcy_file_output:
+
+Controlling the Hierarchy File Output Format
+--------------------------------------------
+
+There are two new parameters governing the format of the hierarchy
+format:
+
+``[OutputControl.]HierarchyFileInputFormat = 0, 1``
+
+ This specifies the format of the hierarchy file to be read in: 0 =
+ ASCII, 1 = HDF5. Default set to 0 for now, but will change to 1 in the
+ future.
+
+``[OutputControl.]HierarchyFileOutputFormat = 0, 1, 2`` [OutputControl.HierarchyFileOutputFormat in new-config]
+
+ This specifies the format of the hierarchy file to be written out: 0
+ = ASCII, 1 = HDF5, 2 = both. Default set to 2 for now, but will change
+ to 1 in the future.
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/reference/NestedGridParticles.rst
--- a/doc/manual/source/reference/NestedGridParticles.rst
+++ b/doc/manual/source/reference/NestedGridParticles.rst
@@ -15,8 +15,8 @@
-----------
Following the
-:doc:`cosmology tutorial </tutorials/RunCosmologySimulation>` for
-:doc:`nested grids </tutorials/WritingParameterFiles>`,
+:doc:`cosmology tutorial </user_guide/CosmologicalInitialConditions>` for
+:doc:`nested grids </user_guide/WritingParameterFiles>`,
first inits is run, and then ring is run on the output of inits to
prepare data for the Parallel Root Grid IO mode of Enzo. The contents of the
initial conditions are easily inspected:
@@ -102,7 +102,7 @@
`h5py <http://code.google.com/p/h5py/>`_. A simple way to gain an
installation of Python with these modules is to install
`yt <http://yt.enzotools.org/>`_, which is one of the
-:doc:`data analysis tools </tutorials/DataAnalysisBasics>`
+:doc:`data analysis tools </user_guide/AnalyzingWithYT>`
available for Enzo.
Procedure
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/reference/index.rst
--- a/doc/manual/source/reference/index.rst
+++ b/doc/manual/source/reference/index.rst
@@ -8,6 +8,7 @@
EnzoAlgorithms.rst
EnzoInternalUnits.rst
EnzoParticleMass.rst
+ HierarchyFile.rst
FluxObjects.rst
Headers.rst
MakeOptions.rst
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/supplementary_info/EmbeddedPython.rst
--- /dev/null
+++ b/doc/manual/source/supplementary_info/EmbeddedPython.rst
@@ -0,0 +1,111 @@
+Embedded Python
+===============
+
+Python can now be embedded inside Enzo, for inline analysis as well as
+interaction. This comes with several shortcomings, but some compelling strong
+points.
+
+How To Compile
+--------------
+
+The configure option that controls compilation of the Python code
+can be toggled with
+
+::
+
+ make python-yes
+
+or to turn it off,
+
+::
+
+ make python-no
+
+This will look for the following variables in the machine-specific Makefile:
+
+::
+
+ MACH_INCLUDES_PYTHON
+ MACH_LIBS_PYTHON
+
+for an example of how to define these variables, see
+Make.mach.orange in the source repository.
+
+How it Works
+------------
+
+On Enzo startup, the Python interface will be initialized. This constitutes the
+creation of an interpreter within the memory-space of each Enzo process, as
+well as import and construct the `NumPy <http://numpy.scipy.org/>`_ function
+table. Several Enzo-global data objects for storing grid parameters and
+simulation parameters will be initialized and the Enzo module will be created
+and filled with those data objects.
+
+Once the Python interface and interpreter have finished initializing, the
+module user_script will be imported -- typically this means that a script named
+``user_script.py`` in the current directory will be imported, but it will
+search the entire import path as well. Every ``PythonSubcycleSkip`` subcycles,
+at the bottom of the hierarchy in ``EvolveLevel.C`` the entire grid hierarchy
+and the current set of parameters will be exported to the Enzo module and then
+user_script.main() will be called.
+
+How to Run
+----------
+
+By constructing a script inside ``user_script.py``, the Enzo hierarchy can be
+accessed and modified. The analysis toolkit `yt <http://yt.enzotools.org/>`_
+has functionality that can abstract much of the data-access and handling.
+Currently several different plotting methods -- profiles, phase plots, slices
+and cutting planes -- along with all derived quantities can be accessed and
+calculated. Projections cannot yet be made, but halo finding can be performed
+with Parallel HOP only. The following script is an example of a script that
+will save a slice as well as print some information about the simulation. Note
+that, other than the instantiation of ``lagos.EnzoStaticOutputInMemory``, this
+script is identical to one that would be run on an output located on disk.
+
+Recipes and convenience functions are being created to make every aspect of
+this simpler.
+
+::
+
+ from yt.mods import *
+
+ def main():
+ pf = lagos.EnzoStaticOutputInMemory()
+ pc = PlotCollection(pf)
+ pc.add_slice("Density", 0)
+ pc.save("%s" % pf)
+ v, c = pf.h.find_max("Density")
+ sp = pf.h.sphere(c, 1.0/pf['mpc'])
+ totals = sp.quantities["TotalQuantity"](["CellMassMsun","Ones"], lazy_reader=True)
+ print "Total mass within 1 mpc: %0.3e total cells: %0.3e" % (totals[0], totals[1])
+
+Which Operations Work
+---------------------
+
+The following operations in yt work:
+
+ * Derived quantities
+ * Slices
+ * Cutting planes
+ * Fixed Resolution Projections (i.e., non-adaptive)
+ * 1-, 2-, 3-D Profiles
+
+This should enable substantial analysis to be conducted in-line. Unfortunate
+adaptive projections require a domain decomposition as they currently stand (as
+of yt-1.7) but this will be eliminated with a quad-tree projection method
+slated to come online in yt-2.0. In future versions of yt the volume rendering
+approach will be parallelized using kD-tree decomposition and it will also
+become available for inline processing.
+
+Please drop a line to the yt or Enzo mailing lists for help with any of this!
+
+Things Not Yet Done
+-------------------
+
+- Adaptive Projections do not work.
+- Particles are not yet exported correctly
+- Speed could be improved, but should be extremely efficient for a small
+ number of grids. Future versions will utilize intercommunicators in MPI to
+ allow for asynchronous analysis.
+
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/supplementary_info/FlowChart.rst
--- /dev/null
+++ b/doc/manual/source/supplementary_info/FlowChart.rst
@@ -0,0 +1,20 @@
+.. _FlowChart:
+
+Enzo Flow Chart, Source Browser
+===============================
+
+`Here's a cartoon of
+Enzo. <http://lca.ucsd.edu/software/enzo/v1.5/flowchart/>`_ This was
+written as a first look as the details of how enzo works. Black
+arrows indicate further flow charts. Grey boxes (usually) indicate
+direct links to the source code.
+
+No guarantees are made regarding the correctness of this flowchart --
+it's meant to help get a basic understanding of the flow of Enzo
+before extensive code modifications. `Also see the Enzo Source
+Browser. <http://lca.ucsd.edu/software/enzo/v1.0.1/source_browser/>`_
+This is a second attempt at the same thing in a more dynamic way. It
+allows one to (in principle) see all the routines called from a
+function, in order, and jump to the source showing the call. It also
+allows you to see a reverse call stack of every routine that calls a
+particular function.
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/supplementary_info/RunCosmologySimulation.rst
--- /dev/null
+++ b/doc/manual/source/supplementary_info/RunCosmologySimulation.rst
@@ -0,0 +1,293 @@
+.. _RunCosmologySimulation:
+
+Deprecated: Running a Cosmology Simulation
+==========================================
+
+This section has mostly been replaced by :ref:`CosmologicalInitialConditions`.
+
+In order to run a cosmology simulation, you'll need to build enzo.exe,
+and an initial conditions generator (either MUSIC or inits).
+
+inits.exe and ring.exe - inits creates the
+initial conditions for your simulation, and ring splits up the root
+grid which is necessary if you're using parallel IO. Once you have
+built the three executables, put them in a common directory where you
+will run your test simulation. You will also save the inits and param
+files (shown and discussed below) in this directory.
+
+Creating initial conditions
+---------------------------
+
+The first step in preparing the simulation is to create the initial
+conditions. The file inits uses is a text file which contains a
+list of parameters with their associated values. These
+values tell the initial conditions generator necessary information
+like the simulation box size, the cosmological parameters and the
+size of the root grid. The code then takes that information and
+creates a set of initial conditions. Here is an example inits
+file:
+
+.. highlight:: none
+
+::
+
+ #
+ # Generates initial grid and particle fields for a
+ # CDM simulation
+ #
+ # Cosmology Parameters
+ #
+ CosmologyOmegaBaryonNow = 0.044
+ CosmologyOmegaMatterNow = 0.27
+ CosmologyOmegaLambdaNow = 0.73
+ CosmologyComovingBoxSize = 10.0 // in Mpc/h
+ CosmologyHubbleConstantNow = 0.71 // in units of 100 km/s/Mpc
+ CosmologyInitialRedshift = 60
+ #
+ # Power spectrum Parameters
+ #
+
+ PowerSpectrumType = 11
+ PowerSpectrumSigma8 = 0.9
+ PowerSpectrumPrimordialIndex = 1.0
+ PowerSpectrumRandomSeed = -584783758
+ #
+ # Grid info
+ #
+ Rank = 3
+ GridDims = 32 32 32
+ InitializeGrids = 1
+ GridRefinement = 1
+ #
+ # Particle info
+ #
+ ParticleDims = 32 32 32
+ InitializeParticles = 1
+ ParticleRefinement = 1
+ #
+ # Overall field parameters
+ #
+ #
+ # Names
+ #
+ ParticlePositionName = ParticlePositions
+ ParticleVelocityName = ParticleVelocities
+ GridDensityName = GridDensity
+ GridVelocityName = GridVelocities
+
+inits is run by typing this command:
+
+::
+
+ ./inits.exe -d Example_Cosmology_Sim.inits
+
+inits will produce some output to the screen to tell you what it is
+doing, and will write five files: ``GridDensity``, ``GridVelocities``,
+``ParticlePositions``, ``ParticleVelocities`` and ``PowerSpectrum.out``. The
+first four files contain information on initial conditions for the
+baryon and dark matter componenets of the simulation, and are HDF5
+files. The last file is an ascii file which contains information on
+the power spectrum used to generate the initial conditions.
+
+It is also possible to run cosmology simulations using initial
+nested subgrids.
+
+Parallel IO - the ring tool
+---------------------------
+
+This simulation is quite small. The root grid is only 32 cells on a
+side and we allow a maximum of three levels of mesh refinement.
+Still, we will use the ring tool, since it is important for larger
+simulations of sizes typically used for doing science. Additionally,
+if you wish to run with 64 or more processors, you should use
+``ParallelRootGridIO``, described in :ref:`ParallelRootGridIO`.
+
+The ring tool is part of the Enzo parallel IO (input-output)
+scheme. Examine the last section of the parameter file (see below)
+for this example simulation and you will see:
+
+::
+
+ #
+ # IO parameters
+ #
+ ParallelRootGridIO = 1
+ ParallelParticleIO = 1
+
+These two parameters turn on parallel IO for both grids and
+particles. In a serial IO simulation where multiple processors are
+being used, the master processor reads in all of the grid and
+particle initial condition information and parcels out portions of
+the data to the other processors. Similarly, all simulation output
+goes through the master processor as well. This is fine for
+relatively small simulations using only a few processors, but slows
+down the code considerably when a huge simulation is being run on
+hundreds of processors. Turning on the parallel IO options allows
+each processor to perform its own IO, which greatly decreases the
+amount of time the code spends performing IO.
+
+The process for parallelizing grid and particle information is quite different.
+Since it is known exactly where every grid cell in a structured Eulerian grid
+is in space, and these cells are stored in a regular and predictable order in
+the initial conditions files, turning on ``ParallelRootGridIO`` simply tells
+each processor to figure out which portions of the arrays in the GridDensity
+and ``GridVelocities`` belong to it, and then read in only that part of the
+file. The particle files (``ParticlePositions`` and ``ParticleVelocities``)
+store the particle information in no particular order. In order to efficiently
+parallelize the particle IO the ring tool is used. ring is run on the same
+number of processors as the simulation that you intend to run, and is typically
+run just before Enzo is called for this reason. In ring, each processor reads
+in an equal fraction of the particle position and velocity information into a
+list, flags the particles that belong in its simulation spatial domain, and
+then passes its portion of the total list on to another processor. After each
+portion of the list has made its way to every processor, each processor then
+collects all of the particle and velocity information that belongs to it and
+writes them out into files called ``PPos.nnnn`` and ``PVel.nnnn``, where nnnn
+is the processor number. Turning on the ``ParallelParticleIO`` flag in the Enzo
+parameter file instructs Enzo to look for these files.
+
+For the purpose of this example, you're going to run ring and Enzo on 4
+processors (this is a fixed requirement). The number of processors used in an
+MPI job is set differently on each machine, so you'll have to figure out how
+that works for you. On some machines, you can request an 'interactive queue' to
+run small MPI jobs. On others, you may have to submit a job to the batch queue,
+and wait for it to run.
+
+To start an interactive run, it might look something like this:
+
+::
+
+ qsub -I -V -l walltime=00:30:00,size=4
+
+This tells the queuing system that you want four processors total for a
+half hour of wall clock time. You may have to wait a bit until
+nodes become available, and then you will probably start out back
+in your home directory. You then run ring on the particle files by
+typing something like this:
+
+::
+
+ mpirun -n 4 ./ring.exe pv ParticlePositions ParticleVelocities
+
+This will then produce some output to your screen, and will
+generate 8 files: ``PPos.0000`` through ``PPos.0003`` and ``PVel.0000`` through
+``PVel.0003``. Note that the 'mpirun' command may actually be 'aprun'
+or something similar. Consult your supercomputer's documentation to
+figure out what this command should really be.
+
+Congratulations, you're now ready to run your cosmology
+simulation!
+
+Running an Enzo cosmology simulation
+------------------------------------
+
+After all of this preparation, running the simulation itself should
+be straightforward. First, you need to have an Enzo parameter file.
+Here is an example compatible with the inits file above:
+
+::
+
+ #
+ # AMR PROBLEM DEFINITION FILE: Cosmology Simulation (AMR version)
+ #
+ # define problem
+ #
+ ProblemType = 30 // cosmology simulation
+ TopGridRank = 3
+ TopGridDimensions = 32 32 32
+ SelfGravity = 1 // gravity on
+ TopGridGravityBoundary = 0 // Periodic BC for gravity
+ LeftFaceBoundaryCondition = 3 3 3 // same for fluid
+ RightFaceBoundaryCondition = 3 3 3
+ #
+ # problem parameters
+ #
+ CosmologySimulationOmegaBaryonNow = 0.044
+ CosmologySimulationOmegaCDMNow = 0.226
+ CosmologyOmegaMatterNow = 0.27
+ CosmologyOmegaLambdaNow = 0.73
+ CosmologySimulationDensityName = GridDensity
+ CosmologySimulationVelocity1Name = GridVelocities
+ CosmologySimulationVelocity2Name = GridVelocities
+ CosmologySimulationVelocity3Name = GridVelocities
+ CosmologySimulationParticlePositionName = ParticlePositions
+ CosmologySimulationParticleVelocityName = ParticleVelocities
+ CosmologySimulationNumberOfInitialGrids = 1
+ #
+ # define cosmology parameters
+ #
+ ComovingCoordinates = 1 // Expansion ON
+ CosmologyHubbleConstantNow = 0.71 // in km/s/Mpc
+ CosmologyComovingBoxSize = 10.0 // in Mpc/h
+ CosmologyMaxExpansionRate = 0.015 // maximum allowed delta(a)/a
+ CosmologyInitialRedshift = 60.0 //
+ CosmologyFinalRedshift = 3.0 //
+ GravitationalConstant = 1 // this must be true for cosmology
+ #
+ # set I/O and stop/start parameters
+ #
+ CosmologyOutputRedshift[0] = 25.0
+ CosmologyOutputRedshift[1] = 10.0
+ CosmologyOutputRedshift[2] = 5.0
+ CosmologyOutputRedshift[3] = 3.0
+ #
+ # set hydro parameters
+ #
+ Gamma = 1.6667
+ PPMDiffusionParameter = 0 // diffusion off
+ DualEnergyFormalism = 1 // use total & internal energy
+ InterpolationMethod = 1 // SecondOrderA
+ CourantSafetyNumber = 0.5
+ ParticleCourantSafetyNumber = 0.8
+ FluxCorrection = 1
+ ConservativeInterpolation = 0
+ HydroMethod = 0
+ #
+ # set cooling parameters
+ #
+ RadiativeCooling = 0
+ MultiSpecies = 0
+ RadiationFieldType = 0
+ StarParticleCreation = 0
+ StarParticleFeedback = 0
+ #
+ # set grid refinement parameters
+ #
+ StaticHierarchy = 0 // AMR turned on!
+ MaximumRefinementLevel = 3
+ MaximumGravityRefinementLevel = 3
+ RefineBy = 2
+ CellFlaggingMethod = 2 4
+ MinimumEfficiency = 0.35
+ MinimumOverDensityForRefinement = 4.0 4.0
+ MinimumMassForRefinementLevelExponent = -0.1
+ MinimumEnergyRatioForRefinement = 0.4
+
+ #
+ # set some global parameters
+ #
+ GreensFunctionMaxNumber = 100 // # of greens function at any one time
+
+
+ #
+ # IO parameters
+ #
+
+ ParallelRootGridIO = 1
+ ParallelParticleIO = 1
+
+Once you've saved this, you start Enzo by typing:
+
+::
+
+ mpirun -n 4 ./enzo.exe -d Example_Cosmology_Sim.param >& output.log
+
+The simulation will now run. The -d flag ensures a great deal of
+output, so you may redirect it into a log file called ``output.log``
+for later examination. This particular simulation shouldn't take
+too long, so you can run this in the same 30 minute interactive job
+you started when you ran inits. When the simulation is done, Enzo
+will display the message "Successful run, exiting."
+
+Congratulations! If you've made it this far, you have now successfully
+run a cosmology simulation using Enzo!
diff -r 7625ee3b9da4 -r 20cc80373d61 doc/manual/source/supplementary_info/SimulationNamesAndIdentifiers.rst
--- /dev/null
+++ b/doc/manual/source/supplementary_info/SimulationNamesAndIdentifiers.rst
@@ -0,0 +1,108 @@
+.. _SimulationNamesAndIdentifiers:
+
+Simulation Names and Identifiers
+================================
+
+To help track and identify simulations and datasets, a few new
+lines have been added to the parameter file:
+
+``MetaDataIdentifier``
+ short string persisted across datasets
+``MetaDataSimulationUUID``
+ uuid persisted across datasets
+``MetaDataDatasetUUID``
+ unique dataset uuid
+``MetaDataRestartDatasetUUID``
+ input dataset uuid
+``MetaDataInitialConditionsUUID``
+ initial conditions uuid
+
+
+The parameters stored during a run are members of the
+TopGridData struct.
+
+MetaDataIdentifier
+------------------
+
+This is a character string without spaces (specifically, something
+that can be picked by "%s"), that can be defined in a parameter
+file, and will be written out in every following output. It's
+intended to be a human-friendly way of tracking datasets. For
+example
+
+Example:
+
+::
+
+ MetaDataIdentifier = Cosmology512_Mpc_run4
+
+
+MetaDataSimulationUUID
+----------------------
+
+The MetaDataSimulationUUID is a globally unique identifier for a collection of
+datasets. Â `Universally Unique Identifiers
+<http://en.wikipedia.org/wiki/Universally_Unique_Identifier>`_ (UUIDs) are
+opaque identifiers using random 128-bit numbers, with an extremely low chance
+of collision. Therefore, they are very useful when trying to label data coming
+from multiple remote resources (say, computers distributed around the world).
+
+Example:
+
+::
+
+ MetaDataSimulationUUID = e5f72b77-5258-45ba-a376-ffe11907fae1
+
+
+Like the ``MetaDataIdentifier``, the ``MetaDataSimulationUUID`` is read in at
+the beginning of a run, and then re-written with each output. However, if one
+is not found initially, a new one will be generated, using code from the `ooid
+library <http://sourceforge.net/projects/ooid/>`_ included in Enzo.
+
+UUIDs can be generated with a variety of tools, including the python standard
+library.
+
+MetaDataDatasetUUID
+-------------------
+
+A MetaDataDatasetUUID is created at each output.
+
+Example:
+
+::
+
+ MetaDataDatasetUUID = b9d78cc7-2ecf-4d66-a23c-a1dcd40e7955
+
+
+MetaDataRestartDatasetUUID
+
+--------------
+
+While reading the parameter file, if a MetaDataDatasetUUID line is
+found, it is stored, and re-written as MetaDataRestartDatasetUUID.
+The intention of this is help track datasets across restarts and
+parameter tweaks.
+
+Example:
+
+::
+
+ MetaDataRestartDatasetUUID = b9d78cc7-2ecf-4d66-a23c-a1dcd40e7955
+
+MetaDataInitialConditionsUUID
+-----------------------------
+
+This is similar to ``MetaDataRestartDatasetUUID``, except it's intended for tracking which initial conditions were used for a simulation.
+
+Example:
+
+::
+
+ MetaDataInitialConditionsUUID = 99f71bdf-e56d-4daf-88f6-1ecd988cbc9f
+
+Still to be done
+----------------
+
+ * Add UUID generation to ``inits`` store it in the HDF5 output.
+ * Preserve the UUID when using ``ring``.
+ * Have Enzo check for the UUID in both cases.
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/enzo/enzo-dev/commits/5795a56c9b22/
Changeset: 5795a56c9b22
Branch: week-of-code
User: pgrete
Date: 2017-08-22 16:00:54+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 44 files
diff -r 20cc80373d61 -r 5795a56c9b22 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -15,3 +15,4 @@
d073462b1884d9653fc6dc2511a8bcdbb281993f enzo-2.4
2984068d220f8fce3470447e9c26be383ba48c9f enzo-2.5
6300a72aca0a4d968bed49186e69f7cd5c7ce58c gold-standard-v1
+0537e499315c87bb50f48b976de8b49742d24d40 gold-standard-v2
diff -r 20cc80373d61 -r 5795a56c9b22 doc/manual/source/parameters/gravity.rst
--- a/doc/manual/source/parameters/gravity.rst
+++ b/doc/manual/source/parameters/gravity.rst
@@ -26,10 +26,8 @@
``MaximumGravityRefinementLevel`` (external)
This is the lowest (most refined) depth that a gravitational
acceleration field is computed. More refined levels interpolate
- from this level, provided a mechanism for instituting a minimum
+ from this level, providing a mechanism for instituting a minimum
gravitational smoothing length. Default: ``MaximumRefinementLevel``
- (unless ``HydroMethod`` is ZEUS and radiative cooling is on, in which
- case it is ``MaximumRefinementLevel`` - 3).
``MaximumParticleRefinementLevel`` (external)
This is the level at which the dark matter particle contribution to
the gravity is smoothed. This works in an inefficient way (it
@@ -84,7 +82,7 @@
2, then it takes the mass of the dark matter halo in CGS
units. ``ProblemType`` = 31 (galaxy disk simulation) automatically calculates
values for ``PointSourceGravityConstant`` and
- ``PointSourceGravityCoreRadius``. Default: 1
+ ``PointSourceGravityCoreRadius``. ``ProblemType`` = 108 (elliptical galaxy and galaxy cluster) also includes the gravity from the stellar component and the SMBH. Default: 1
``PointSourceGravityCoreRadius`` (external)
For ``PointSourceGravity`` = 1, this is the radius inside which
the acceleration field is smoothed in code units. With ``PointSourceGravity`` =
diff -r 20cc80373d61 -r 5795a56c9b22 doc/manual/source/parameters/hydro.rst
--- a/doc/manual/source/parameters/hydro.rst
+++ b/doc/manual/source/parameters/hydro.rst
@@ -30,17 +30,18 @@
More details on each of the above methods can be found at :ref:`hydro_methods`.
``FluxCorrection`` (external)
This flag indicates if the flux fix-up step should be carried out
- around the boundaries of the sub-grid to preserve conservation (1 -
- on, 0 - off). Strictly speaking this should always be used, but we
- have found it to lead to a less accurate solution for cosmological
- simulations because of the relatively sharp density gradients
- involved. However, it does appear to be important when radiative
- cooling is turned on and very dense structures are created.
- It does work with the ZEUS
- hydro method, but since velocity is face-centered, momentum flux is
- not corrected. Species quantities are not flux corrected directly
- but are modified to keep the fraction constant based on the density
- change. Default: 1
+ around the boundaries of the sub-grid to preserve conservation (0 -
+ off, 1 - on, 2 - direct correction for color fields). Strictly speaking
+ this should always be used, but we have found it to lead to a less
+ accurate solution for cosmological simulations because of the relatively
+ sharp density gradients involved. However, it does appear to be
+ important when radiative cooling is turned on and very dense structures
+ are created. It does work with the ZEUS hydro method, but since velocity
+ is face-centered, momentum flux is not corrected. If FluxCorrection = 1,
+ species quantities are not flux corrected directly but are modified to
+ keep the fraction constant based on the density change. If FluxCorrection
+ = 2, species quantities are flux corrected directly in the same way as
+ density and energy. Default: 1
``InterpolationMethod`` (external)
There should be a whole section devoted to the interpolation
method, which is used to generate new sub-grids and to fill in the
diff -r 20cc80373d61 -r 5795a56c9b22 doc/manual/source/parameters/problemtypes.rst
--- a/doc/manual/source/parameters/problemtypes.rst
+++ b/doc/manual/source/parameters/problemtypes.rst
@@ -1,5 +1,5 @@
Problem Type Parameters
---------------------
+-----------------------
``ProblemType`` (external)
This integer specifies the type of problem to be run. Its value
@@ -1245,7 +1245,7 @@
.. _stochastic_forcing_param:
Turbulence Simulation with Stochastic Forcing (59)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~
Typical quasi-isothermal "turbulence-in-a-box" problem with non-static driving field.
For details on stochastic forcing, see Schmidt et al. 2009 A&A 494, 127-145
http://dx.doi.org/10.1051/0004-6361:200809967
@@ -1529,7 +1529,7 @@
``ClusterSMBHJetPrecessionPeriod`` (external)
Unit: Myr. Default: 0.0 (not precessing)
``ClusterSMBHCalculateGasMass`` (external)
- Type: integer. 1--Calculate the amount of cold gas around the SMBH and remove it at the rate of 2*Mdot; 2--Calculate Mdot based on the amount of cold gas around the SMBH; 0--off (do not remove cold gas). Default: 1.
+ Type: integer. 1--Calculate the amount of cold gas around the SMBH and remove it at the rate of 2*Mdot; 2--Calculate Mdot based on the amount of cold gas around the SMBH; 3--Calculate Mdot similar to 2 but change ClusterSMBHJetDim periodically (period = ClusterSMBHJetPrecessionPeriod); 4--Calculate Mdot within Bondi radius (only use this when Bondi radius is resolved); 0--off (do not remove cold gas). Default: 1.
``ClusterSMBHFeedbackSwitch`` (external)
Boolean flag. When ClusterSMBHCalculateGasMass=1, ClusterSMBHFeedbackSwitch is turned on when there is enough cold gas (ClusterSMBHEnoughColdGas) around the SMBH. Default: FALSE
``ClusterSMBHEnoughColdGas`` (external)
@@ -1540,7 +1540,18 @@
0--x; 1--y; 2--z. Default: 2
``ClusterSMBHAccretionEpsilon`` (external)
Jet Edot = ClusterSMBHAccretionEpsilon * Mdot * c^2. Default: 0.001
-
+``ClusterSMBHDiskRadius`` (external)
+ The size of the accretion zone in kpc. Default: 0.5
+``ClusterSMBHBCG`` (external)
+ The stellar component of the Perseus BCG (in cluster simulations) or the elliptical galaxies (in simulations of isolated elliptical galaxies). Default: 1.0
+``ClusterSMBHMass`` (external)
+ The mass of the SMBH of the Perseus BCG (in cluster simulations) or the elliptical galaxies (in simulations of isolated elliptical galaxies). Default: 0
+``EllipticalGalaxyRe`` (external)
+ Re is the radius of the isophote enclosing half of the galaxy's light. In Herquist profile, a=Re/1.8153. Default: 0
+``OldStarFeedbackAlpha`` (external)
+ Mass ejection rate from evolved stars in the unit of 10^{-19} s^{-1}. It is typically within a factor of 2 of unity. Default: 0
+``SNIaFeedbackEnergy`` (external)
+ Energy feedback from evolved stars (Type Ia SN). Default: 1.0
.. _mhd1d_param:
@@ -1694,7 +1705,7 @@
.. _agndisk_param:
AGN Disk (207)
-~~~~~~~~~~~~
+~~~~~~~~~~~~~~
``DiskType`` (external)
Default: 1
@@ -1714,11 +1725,10 @@
Initial height of the disk. Default: 1
.. _poissonsolver_param:
-
.. _cr_shocktube_param:
CR Shock Tube (250: unigrid and AMR)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Very similar to normal shock tube (see problem 1) but includes CR
component. See Salem, Bryan & Hummels (2014) for discussion.
diff -r 20cc80373d61 -r 5795a56c9b22 run/Hydro/Hydro-1D/ShockInABox/ShockInABox.enzo
--- a/run/Hydro/Hydro-1D/ShockInABox/ShockInABox.enzo
+++ b/run/Hydro/Hydro-1D/ShockInABox/ShockInABox.enzo
@@ -1,5 +1,5 @@
#
-# ShockInABox Problem.
+# Custom ShockInABox Problem.
# Shock Mach Number: 3.000000
# PreShock Temperature: 2.727273e+06
# PostShock Temperature: 1.000000e+07
@@ -30,7 +30,6 @@
# set Hydro parameters
#
HydroMethod = 0
-Gamma = 1.66667
CourantSafetyNumber = 0.8
PPMDiffusionParameter = 1 // diffusion on
PPMFlatteningParameter = 1 // flattening on
@@ -62,10 +61,11 @@
DensityUnits = 1.67453400e-24
LengthUnits = 3.08567758e+24
TimeUnits = 3.15576000e+16
+Gamma = 1.666667
ShockInABoxLeftDensity = 1.00000000e+00
-ShockInABoxLeftVelocity = 6.90120768e-01
+ShockInABoxLeftVelocity = 7.66800854e-01
ShockInABoxLeftPressure = 3.91989033e-02
ShockInABoxRightDensity = 3.00000000e+00
-ShockInABoxRightVelocity = 1.78920199e-01
+ShockInABoxRightVelocity = 2.55600285e-01
ShockInABoxRightPressure = 4.31187936e-01
diff -r 20cc80373d61 -r 5795a56c9b22 run/Hydro/Hydro-1D/ShockInABox/make_plots.py
--- a/run/Hydro/Hydro-1D/ShockInABox/make_plots.py
+++ b/run/Hydro/Hydro-1D/ShockInABox/make_plots.py
@@ -1,46 +1,48 @@
-from yt.mods import *
+from __future__ import print_function
+import yt
import pylab
-
+import numpy as numpy
### define simulation output directory and filename base
output_dir_base = 'DD'
datafile_base = 'data'
### load data
-ts = TimeSeriesData.from_filenames("*/*.hierarchy")
+ts = yt.DatasetSeries.from_filenames("*/*.hierarchy")
for pf in ts:
pylab.clf()
print(pf.current_time)
### extract an ortho_ray (1D solution vector)
- ray = pf.h.ortho_ray(0, [0.5, 0.5])
-
+ ray = pf.ortho_ray(0, [0.5, 0.5])
+ ray_sort = numpy.argsort(ray["x"])
pylab.figure(1, figsize=(10,8))
# Density Plot
pylab.subplot(2,2,1)
- pylab.semilogy(ray['x'],ray['Density'], 'k')
+ pylab.semilogy(ray['x'][ray_sort],ray['density'][ray_sort])
pylab.xlabel('Position')
pylab.ylabel('Density')
# Temperature Plot
pylab.subplot(2,2,2)
- pylab.semilogy(ray['x'],ray['Temperature'], 'b')
+ pylab.semilogy(ray['x'][ray_sort],ray['Temperature'][ray_sort], 'b')
pylab.xlabel('Position')
pylab.ylabel('Temperature')
# Mach Plot
pylab.subplot(2,2,3)
- pylab.plot(ray['x'],ray['Mach'], 'k')
+ pylab.plot(ray['x'][ray_sort],ray['Mach'][ray_sort], 'k')
pylab.xlabel('x')
pylab.ylabel('Mach')
# Mach Plot
pylab.subplot(2,2,4)
- pylab.plot(ray['x'],ray['VelocityMagnitude'], 'k')
+ pylab.plot(ray['x'][ray_sort],ray[('gas','velocity_magnitude')][ray_sort], 'k')
pylab.xlabel('x')
pylab.ylabel('|v|')
### Save plot
+ pylab.tight_layout()
pylab.savefig('%s_thermal.png' % pf)
pylab.clf()
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/CallProblemSpecificRoutines.C
--- a/src/enzo/CallProblemSpecificRoutines.C
+++ b/src/enzo/CallProblemSpecificRoutines.C
@@ -69,6 +69,9 @@
/* Add radio-mode jet feedback */
if (ClusterSMBHFeedback == TRUE)
ThisGrid->GridData->ClusterSMBHFeedback(level);
+ /* Add Feedback from evolved stars */
+ if (OldStarFeedbackAlpha > 0.0)
+ ThisGrid->GridData->OldStarFeedback();
return SUCCESS;
}
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/ClusterSMBHSumGasMass.C
--- a/src/enzo/ClusterSMBHSumGasMass.C
+++ b/src/enzo/ClusterSMBHSumGasMass.C
@@ -79,19 +79,27 @@
fprintf(stderr, "Error in GetUnits.\n");
return FAIL;
}
- MassUnits = DensityUnits*pow(LengthUnits,3);
+ MassUnits = DensityUnits*POW(LengthUnits,3);
float ColdGasMassMsun=ClusterSMBHColdGasMass*MassUnits/SolarMass;
- if (ClusterSMBHCalculateGasMass == 2){
- if (ColdGasMassMsun > 0.001) {
+ if (ClusterSMBHCalculateGasMass >1 ){ //2-calculate&remove,3-calculate&remove&re-orient,4-Bondi
+ if (ClusterSMBHCalculateGasMass == 3){
+ ClusterSMBHJetDim = floor(Time*TimeUnits/(1.0e6*3.1557e7*ClusterSMBHJetPrecessionPeriod)); //ClusterSMBHJetPrecessionPeriod is now the Dim changing period.
+ }
+ if (ColdGasMassMsun > 0.000001) {
ClusterSMBHFeedbackSwitch = TRUE;
+ if (ClusterSMBHCalculateGasMass == 4){ // no matter how much cold gas there is; accretiontime is now =dtFixed
+ ClusterSMBHJetMdot = (ColdGasMassMsun/(ClusterSMBHAccretionTime))/2.0; // AccretionTime already in s; Mdot in Msun/s. Devide it by 2 because Mdot is for only one jet.
+ ClusterSMBHJetEdot = (ClusterSMBHAccretionEpsilon*ClusterSMBHJetMdot * SolarMass) * POW(clight,2)/1.0e44; //for one jet
+ }
+ else {
ClusterSMBHJetMdot = (ColdGasMassMsun/(ClusterSMBHAccretionTime*1e6))/2.0; // AccretionTime from Myr to yr; reset Mdot, still in Msun/yr. Devide it by 2 because Mdot is for only one jet.
- float epsilon=0.001;
- ClusterSMBHJetEdot = (epsilon*ClusterSMBHJetMdot * SolarMass/3.1557e7) * pow(clight,2)/1.0e44; //for one jet
+ ClusterSMBHJetEdot = (ClusterSMBHAccretionEpsilon*ClusterSMBHJetMdot * SolarMass/3.1557e7) * POW(clight,2)/1.0e44; //for one jet
}
+ }
else
ClusterSMBHFeedbackSwitch = FALSE; // if there is not enough ColdGas, then do not turn jet on.
- } // end if ClusterSMBHCalculateGasMass == 2
+ } // end if ClusterSMBHCalculateGasMass > 1
if (ClusterSMBHCalculateGasMass == 1) {
int LastClusterSMBHFeedbackSwitch = ClusterSMBHFeedbackSwitch;
if (ColdGasMassMsun < 1.0e5)
@@ -107,10 +115,11 @@
ClusterSMBHJetDim += 1;
}
} // end if ClusterSMBHCalculateGasMass == 1
+
if (MyProcessorNumber == ROOT_PROCESSOR) {
FILE *fptr=fopen("MT.out","a");
- fprintf(fptr,"Time, ClusterSMBHStartTime, Switch, and Total ClusterSMBHColdGasMass in Msun = %g %g %d %g \n", Time, ClusterSMBHStartTime, ClusterSMBHFeedbackSwitch, ColdGasMassMsun);
+ fprintf(fptr,"Time, ClusterSMBHJetMdot, ClusterSMBHAccretionTime, and Total ClusterSMBHColdGasMass in Msun = %"ESYM" %"ESYM" %"ESYM" %"ESYM"\n", Time, ClusterSMBHJetMdot, ClusterSMBHAccretionTime, ColdGasMassMsun);
fclose(fptr);
}
return SUCCESS;
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/CreateSUBlingList.C
--- a/src/enzo/CreateSUBlingList.C
+++ b/src/enzo/CreateSUBlingList.C
@@ -76,7 +76,7 @@
NumberOfGrids = GenerateGridArray(LevelArray, level, &Grids);
NumberOfChildGrids = GenerateGridArray(LevelArray, level+1, &ChildGrids);
- if( FluxCorrection != TRUE ) {
+ if( FluxCorrection == 0 ) {
for (grid1 = 0; grid1 < NumberOfGrids; grid1++)
(*SUBlingList)[grid1] = NULL;
return SUCCESS;
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/DeleteSUBlingList.C
--- a/src/enzo/DeleteSUBlingList.C
+++ b/src/enzo/DeleteSUBlingList.C
@@ -40,7 +40,7 @@
LevelHierarchyEntry **SUBlingList)
{
- if( FluxCorrection != TRUE )
+ if( FluxCorrection == 0 )
return SUCCESS;
LevelHierarchyEntry *LastEntry, *NextEntry;
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/ExternalBoundary_SetExternalBoundary.C
--- a/src/enzo/ExternalBoundary_SetExternalBoundary.C
+++ /dev/null
@@ -1,294 +0,0 @@
-/***********************************************************************
-/
-/ EXTERNAL BOUNDARY CLASS (SET A GRID'S BOUNDARY)
-/
-/ written by: Greg Bryan
-/ date: November, 1994
-/ modified1:
-/
-/ PURPOSE:
-/
-/ RETURNS: SUCCESS or FAIL
-/
-************************************************************************/
-
-#include <stdio.h>
-#include "ErrorExceptions.h"
-#include "macros_and_parameters.h"
-#include "typedefs.h"
-#include "global_data.h"
-#include "Fluxes.h"
-#include "GridList.h"
-#include "ExternalBoundary.h"
-#include "Grid.h"
-
-// This is used to set the corners (which are not really used) of the
-// grid to something reasonable in the case of periodic B.C.'s
-
-//#define USE_PERIODIC
-
-//
-// Given a pointer to a field and its field type, find the equivalent
-// field type in the list of boundary's and apply that boundary value/type.
-// Returns: 0 on failure
-//
-int ExternalBoundary::SetExternalBoundary(int FieldRank, int GridDims[],
- int GridOffset[],
- int StartIndex[], int EndIndex[],
- float *Field, int FieldType)
-{
-
- /* declarations */
-
- int i, j, k, dim, Sign, bindex;
- float *index;
-
- /* error check: grid ranks */
-
- if (FieldRank != BoundaryRank) {
- ENZO_VFAIL("FieldRank(%"ISYM") != BoundaryRank(%"ISYM").\n",
- FieldRank, BoundaryRank)
- }
-
- /* find requested field type */
-
- int field;
- for (field = 0; field < NumberOfBaryonFields; field++)
- if (FieldType == BoundaryFieldType[field]) break;
- if (field == NumberOfBaryonFields) {
- ENZO_VFAIL("Field type (%"ISYM") not found in Boundary.\n", FieldType)
- }
-
- /* error check: make sure the boundary type array exists */
-
- for (dim = 0; dim < BoundaryRank; dim++)
- if (BoundaryDimension[dim] != 1) {
- if (BoundaryType[field][dim][0] == NULL) {
- ENZO_FAIL("BoundaryType not yet declared.\n");
- }
- }
-
-
-
- /* set Boundary conditions */
-
- Sign = 1;
- if (FieldType == Velocity1) Sign = -1;
-
- if (BoundaryDimension[0] > 1 && GridOffset[0] == 0) {
-
- /* set x inner (left) face */
-
- for (i = 0; i < StartIndex[0]; i++)
- for (j = 0; j < GridDims[1]; j++)
- for (k = 0; k < GridDims[2]; k++) {
- index = Field + i + j*GridDims[0] + k*GridDims[1]*GridDims[0];
- bindex = j+GridOffset[1] + (k+GridOffset[2])*BoundaryDimension[1];
- switch (BoundaryType[field][0][0][bindex]) {
- case reflecting:
- *index = Sign*(*(index + (2*StartIndex[0] - 1 - 2*i)));
- break;
- case outflow:
- *index = *(index + ( StartIndex[0] - i)) ;
- break;
- case inflow:
- *index = BoundaryValue[field][0][0][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index + (EndIndex[0] - StartIndex[0] + 1));
-#endif /* USE_PERIODIC */
- break;
- case shearing:
- // *index = *(index + (EndIndex[0] - StartIndex[0] + 1));
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (x-left).\n");
- }
- }
- }
-
- if (BoundaryDimension[0] > 1 && GridOffset[0]+GridDims[0] == BoundaryDimension[0]) {
-
- /* set x outer (right) face */
-
- for (i = 0; i < GridDims[0]-EndIndex[0]-1; i++)
- for (j = 0; j < GridDims[1]; j++)
- for (k = 0; k < GridDims[2]; k++) {
- index = Field + i + EndIndex[0]+1 +
- j*GridDims[0] + k*GridDims[1]*GridDims[0];
- bindex = j+GridOffset[1] + (k+GridOffset[2])*BoundaryDimension[1];
- switch (BoundaryType[field][0][1][bindex]) {
- case reflecting:
- *index = Sign*(*(index - (2*i + 1)));
- break;
- case outflow:
- *index = *(index + (-1 - i)) ;
- break;
- case inflow:
- *index = BoundaryValue[field][0][1][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index - (EndIndex[0] - StartIndex[0] + 1));
-#endif /* USE_PERIODIC */
- break;
- case shearing:
- // *index = *(index - (EndIndex[0] - StartIndex[0] + 1));
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (x-right).\n");
- }
- }
- }
-
- /* set y inner (left) face */
-
- Sign = 1;
- if (FieldType == Velocity2) Sign = -1;
-
- if (BoundaryDimension[1] > 1 && GridOffset[1] == 0) {
-
- for (j = 0; j < StartIndex[1]; j++)
- for (i = 0; i < GridDims[0]; i++)
- for (k = 0; k < GridDims[2]; k++) {
- index = Field + i + j*GridDims[0] + k*GridDims[1]*GridDims[0];
- bindex = i+GridOffset[0] + (k+GridOffset[2])*BoundaryDimension[0];
- switch (BoundaryType[field][1][0][bindex]) {
- case reflecting:
- *index = Sign*(*(index + (2*StartIndex[1] - 1 - 2*j)*GridDims[0]));
- break;
- case outflow:
- *index = *(index + ( StartIndex[1] - j)*GridDims[0]) ;
- break;
- case inflow:
- *index = BoundaryValue[field][1][0][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index + (EndIndex[1] - StartIndex[1] + 1)*GridDims[0]);
-#endif /* USE_PERIODIC */
- break;
- case shearing:
-// *index = *(index + (EndIndex[1] - StartIndex[1] + 1)*GridDims[0]);
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (y-left).\n");
- }
- }
- }
-
- if (BoundaryDimension[1] > 1 && GridOffset[1]+GridDims[1] == BoundaryDimension[1]) {
-
- /* set y outer (right) face */
-
- for (j = 0; j < GridDims[1]-EndIndex[1]-1; j++)
- for (i = 0; i < GridDims[0]; i++)
- for (k = 0; k < GridDims[2]; k++) {
- index = Field + i + (j + EndIndex[1]+1)*GridDims[0] +
- k*GridDims[1]*GridDims[0];
- bindex = i+GridOffset[0] + (k+GridOffset[2])*BoundaryDimension[0];
- switch (BoundaryType[field][1][1][bindex]) {
- case reflecting:
- *index = Sign*(*(index - (2*j + 1)*GridDims[0]));
- break;
- case outflow:
- *index = *(index + (-1 - j)*GridDims[0]) ;
- break;
- case inflow:
- *index = BoundaryValue[field][1][1][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index - (EndIndex[1] - StartIndex[1] + 1)*GridDims[0]);
-#endif /* USE_PERIODIC */
- break;
- case shearing:
-// *index = *(index - (EndIndex[1] - StartIndex[1] + 1)*GridDims[0]);
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (y-right).\n");
- }
- }
- }
-
- /* set z inner (left) face */
-
- Sign = 1;
- if (FieldType == Velocity3) Sign = -1;
-
- if (BoundaryDimension[2] > 1 && GridOffset[2] == 0) {
-
- for (k = 0; k < StartIndex[2]; k++)
- for (i = 0; i < GridDims[0]; i++)
- for (j = 0; j < GridDims[1]; j++) {
- index = Field + i + j*GridDims[0] + k*GridDims[1]*GridDims[0];
- bindex = i+GridOffset[0] + (j+GridOffset[1])*BoundaryDimension[0];
- switch (BoundaryType[field][2][0][bindex]) {
- case reflecting:
- *index = Sign*(*(index + (2*StartIndex[2]-1 - 2*k)*GridDims[0]*GridDims[1]));
- break;
- case outflow:
- *index = *(index + ( StartIndex[2] - k)*GridDims[0]*GridDims[1]) ;
- break;
- case inflow:
- *index = BoundaryValue[field][2][0][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index + (EndIndex[2]-StartIndex[2]+1)*GridDims[0]*GridDims[1]);
-#endif /* USE_PERIODIC */
- break;
- case shearing:
-// *index = *(index + (EndIndex[2]-StartIndex[2]+1)*GridDims[0]*GridDims[1]);
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (z-left).\n");
- }
- }
- }
-
- if (BoundaryDimension[2] > 1 && GridOffset[2]+GridDims[2] == BoundaryDimension[2]) {
-
- /* set z outer (right) face */
-
- for (k = 0; k < GridDims[2]-EndIndex[2]-1; k++)
- for (i = 0; i < GridDims[0]; i++)
- for (j = 0; j < GridDims[1]; j++) {
- index = Field + i + j*GridDims[0] +
- (k + EndIndex[2]+1)*GridDims[1]*GridDims[0];
- bindex = i+GridOffset[0] + (j+GridOffset[1])*BoundaryDimension[0];
- switch (BoundaryType[field][2][1][bindex]) {
- case reflecting:
- *index = Sign*(*(index - (2*k + 1)*GridDims[0]*GridDims[1]));
- break;
- case outflow:
- *index = *(index + (-1 - k)*GridDims[0]*GridDims[1]) ;
- break;
- case inflow:
- *index = BoundaryValue[field][2][1][bindex];
- break;
- case periodic:
-#ifdef USE_PERIODIC
- *index = *(index - (EndIndex[2]-StartIndex[2]+1)*GridDims[0]*GridDims[1]);
-#endif /* USE_PERIODIC */
- break;
- case shearing:
-// *index = *(index - (EndIndex[2]-StartIndex[2]+1)*GridDims[0]*GridDims[1]);
- break;
- case BoundaryUndefined:
- default:
- ENZO_FAIL("BoundaryType not recognized (z-right).\n");
-
- }
- }
- }
-
- return SUCCESS;
-
-}
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid.h
--- a/src/enzo/Grid.h
+++ b/src/enzo/Grid.h
@@ -2735,6 +2735,7 @@
int ClusterSMBHFeedback(int level);
int ClusterSMBHEachGridGasMass(int level);
+ int OldStarFeedback();
int SetNumberOfColours(void);
int SaveSubgridFluxes(fluxes *SubgridFluxes[], int NumberOfSubgrids,
float *Flux3D[], int flux, float fluxcoef, float dt);
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_ClusterInitializeGrid.C
--- a/src/enzo/Grid_ClusterInitializeGrid.C
+++ b/src/enzo/Grid_ClusterInitializeGrid.C
@@ -106,10 +106,10 @@
if (ComovingCoordinates) {
CosmologyComputeExpansionFactor(Time, &a, &dadt);
ExpansionFactor = a/(1.0+InitialRedshift);
- CriticalDensity = 2.78e11*pow(HubbleConstantNow, 2); // in Msolar/Mpc^3
+ CriticalDensity = 2.78e11*POW(HubbleConstantNow, 2); // in Msolar/Mpc^3
BoxLength = ComovingBoxSize*ExpansionFactor/HubbleConstantNow; // in Mpc
} else {
- CriticalDensity = 2.78e11*pow(0.74,2); // in Msolar/Mpc^3 for h=0.74
+ CriticalDensity = 2.78e11*POW(0.74,2); // in Msolar/Mpc^3 for h=0.74
BoxLength = LengthUnits / 3.086e24;
HubbleConstantNow = 1.0;
OmegaMatterNow = 1.0;
@@ -127,10 +127,10 @@
PointSourceGravityCoreRadius = SphereCoreRadius[0]*LengthUnits; // in CGS
printf("begin calculating PointSourceGravityConstant");
PointSourceGravityConstant = 4.0*pi*SphereDensity[0]*DensityUnits *
- pow(SphereCoreRadius[0]*LengthUnits, 3) *
- (log(1.0+1.0) - 1.0/(1.0+1.0))/SolarMass;// + 2.43e11 + 3.4e8 //in Msolar + BCG mass + BH mass//Not Mvir, but Ms.
+ POW(SphereCoreRadius[0]*LengthUnits, 3) *
+ (log(1.0+1.0) - 1.0/(1.0+1.0))/SolarMass;// in Msun. Not Mvir, but Ms.
BaryonMeanDensity = 0.15; // 15% baryon fraction
-printf("PointSourceGravityConstant= %g\n", PointSourceGravityConstant);
+printf("PointSourceGravityConstant= %"GSYM"\n", PointSourceGravityConstant);
}
/* Return if this doesn't concern us. */
@@ -155,42 +155,42 @@
sphere = 0;
FILE *fptr = fopen("NFWProfile.out", "w");
+
for (i = 0; i < NFW_POINTS; i++) {
- NFWRadius[i] = SphereRadius[sphere]*pow(10, -5*(float(i)/NFW_POINTS));
+ NFWRadius[i] = SphereRadius[sphere]*POW(10, -5*(float(i)/NFW_POINTS));
x1 = NFWRadius[i]/SphereCoreRadius[sphere];
- if (SphereType[sphere]!=6) {
- NFWDensity[i] = SphereDensity[sphere]/(x1*(1.0+x1)*(1.0+x1));
-}
- if (SphereType[sphere]==6) {
- NFWDensity[i]=mh*(0.0192/(1.0+pow(NFWRadius[i]*LengthUnits/(18.0e-3*Mpc),3.0))+0.046/pow((1.0+pow(NFWRadius[i]*LengthUnits/(57.0e-3*Mpc), 2.0)), 1.8)+0.00563/pow((1.0+pow(NFWRadius[i]*LengthUnits/(200.0e-3*Mpc), 2.0)), 1.1))/DensityUnits/0.88;
- NFWTemp[i] = 8.12e7*(1.0+pow(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3))/(2.3 + pow(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3)); // in K
- NFWPressure[i] = (1.9*(0.0192/(1.0+pow(NFWRadius[i]*LengthUnits/(18.0e-3*Mpc),3.0))+0.046/pow((1.0+pow(NFWRadius[i]*LengthUnits/(57.0e-3*Mpc), 2.0)), 1.8)+0.00563/pow((1.0+pow(NFWRadius[i]*LengthUnits/(200.0e-3*Mpc), 2.0)), 1.1))/DensityUnits)* kboltz * NFWTemp[i];
- NFWMass[i] = 4.0*pi*1891.3*(CriticalDensity/pow(ExpansionFactor, 3))
- * pow(0.02446*BoxLength, 3) * (log(1.0+x1) - x1/(x1+1.0))+ 3.4e8; //DM mass + BH mass (3.4e8)
- NFWSigma[i] = sqrt(kboltz * NFWTemp[i] / (mu * mh)); // in cm/s
- float mean_overdensity = 3.0*SphereDensity[sphere] / (x1*x1*x1) *
- (log(1.0+x1) - x1/(x1+1.0));
- fprintf(fptr, "%d %"GOUTSYM" %g %g %g %g %g %g\n", i, NFWRadius[i],
- NFWDensity[i], NFWMass[i], NFWPressure[i], NFWTemp[i], NFWSigma[i],
- mean_overdensity);
- }
-
- if (SphereType[sphere]==7 || SphereType[sphere]==8 ) {
+ NFWDensity[i] = SphereDensity[sphere]/(x1*(1.0+x1)*(1.0+x1)); // DM Density
+ if (SphereType[sphere]>=6 && SphereType[sphere] <= 8) { //aka 6, 7, 8: Perseus Cluster
rkpc=NFWRadius[i]*LengthUnits/(1.0e-3*Mpc);
+ /* Initial Temperature */
if (rkpc > 300.0){
- NFWTemp[i]=7.0594*1.3*pow((1.0+1.5*NFWRadius[i]/SphereRadius[sphere]),-1.6)*keV; // in K
+ NFWTemp[i]=7.0594*1.3*POW((1.0+1.5*NFWRadius[i]/SphereRadius[sphere]),-1.6)*keV; // in K
} else{
- NFWTemp[i]=8.12e7*(1.0+pow(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3))/(2.3 + pow(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3));
+ NFWTemp[i]=8.12e7*(1.0+POW(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3))/(2.3 + pow(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc*71),3));
if (SphereType[sphere]==8)
NFWTemp[i]=8.12e7;
}
- Allg[i]=GravConst*PointSourceGravityConstant*SolarMass*
+ /* Set Gravity. NFW Dark Matter, BCG+BH */
+ Allg[i]=GravConst*PointSourceGravityConstant*SolarMass*
((log(1.0+x1)-x1/(1.0+x1)) /(log(1.0+1.0)-1.0/(1.0+1.0)))/POW(NFWRadius[i]*LengthUnits, 2.0) +
- POW((POW(POW(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
+ ClusterSMBHBCG*POW((POW(POW(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
POW(POW(NFWRadius[i]*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) +
- GravConst*SolarMass*3.4e8 / POW(NFWRadius[i]*LengthUnits, 2) ;
+ GravConst*SolarMass* ClusterSMBHMass / POW(NFWRadius[i]*LengthUnits, 2) ;
+ }//end Perseus
+ else{ //Elliptical galaxies
+ if (SphereType[sphere]==1) { //NGC 4472
+ NFWTemp[i]=1.16059e7*(1.17-(1.17-0.6)*exp(-NFWRadius[i]*LengthUnits/(2.0*5*1.0e-3*Mpc)));
+ }
+ if (SphereType[sphere]==2) { //NGC 6482
+ NFWTemp[i]=1.16059e7*(0.4+0.4*exp(-NFWRadius[i]*LengthUnits/(2.0*8.0*1.0e-3*Mpc)));
+ }
+ Allg[i]=GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x1)-x1/(1.0+x1)) /(log(1.0+1.0)-1.0/(1.0+1.0)))/POW(NFWRadius[i]*LengthUnits, 2.0) +
+ GravConst*(ClusterSMBHBCG*SolarMass*1.0e11)/POW(NFWRadius[i]*LengthUnits+EllipticalGalaxyRe*1.0e-3*Mpc/1.8153, 2) + //ClusterSMBHBCG is M_* here
+ GravConst*SolarMass* ClusterSMBHMass / POW(NFWRadius[i]*LengthUnits, 2) ;
+ }
+
if (i==0){
-// GasDensity[i]=NFWDensity[i]*(CriticalDensity/pow(ExpansionFactor, 3))*SolarMass/pow(Mpc,3)*0.15; // in cgs
GasDensity[i]=NFWDensity[i]*DensityUnits*0.15; // in cgs
NFWPressure[i]=kboltz*NFWTemp[i]*GasDensity[i]/ (mu * mh); // in cgs
dpdr = -Allg[i]*GasDensity[i];
@@ -201,11 +201,11 @@
GasDensity[i]=NFWPressure[i]/(kboltz * NFWTemp[i]/(mu * mh));
dpdr = -Allg[i]* GasDensity[i];
}
- }
- fprintf(fptr, "%d %g %g %g %g %g %g\n", i, NFWRadius[i],
+
+ fprintf(fptr, "%"ISYM" %"GSYM" %"GSYM" %"GSYM" %"GSYM" %"GSYM" %"GSYM" \n", i, NFWRadius[i],
NFWDensity[i], Allg[i], NFWPressure[i], NFWTemp[i], GasDensity[i]);
} //end for
- fprintf(fptr, "CriticalDensity = %g , DensityUnits = %g, TimeUnits=%g, LengthUnits= %g\n", CriticalDensity, DensityUnits, TimeUnits, LengthUnits);
+ fprintf(fptr, "CriticalDensity = %"GSYM" , DensityUnits = %"GSYM", TimeUnits=%"GSYM", LengthUnits= %"GSYM"\n", CriticalDensity, DensityUnits, TimeUnits, LengthUnits);
fclose(fptr);
/* Loop over the set-up twice, once to count the particles, the second
@@ -265,51 +265,31 @@
/* Find distance from center. */
- r = sqrt(pow(fabs(x-SpherePosition[sphere][0]), 2) +
- pow(fabs(y-SpherePosition[sphere][1]), 2) +
- pow(fabs(z-SpherePosition[sphere][2]), 2) );
+ r = sqrt(POW(fabs(x-SpherePosition[sphere][0]), 2) +
+ POW(fabs(y-SpherePosition[sphere][1]), 2) +
+ POW(fabs(z-SpherePosition[sphere][2]), 2) );
r = max(r, 0.1*CellWidth[0][0]);
if (r < SphereRadius[sphere]) {
- /* 3) NFW profile (use look-up table for temperature and
- velocity dispersion)*/
-
- if (SphereType[sphere] == 3) {
- x1 = r/SphereCoreRadius[sphere];
- dens1 = SphereDensity[sphere]/(x1*(1.0+x1)*(1.0+x1));
- for (m = 1; m < NFW_POINTS; m++)
- if (r > NFWRadius[m]) {
- temperature = NFWTemp[m] + (NFWTemp[m-1] - NFWTemp[m])*
- (r - NFWRadius[m])/(NFWRadius[m-1] - NFWRadius[m]);
- sigma1 = NFWSigma[m] + (NFWSigma[m-1] - NFWSigma[m])*
- (r - NFWRadius[m])/(NFWRadius[m-1] - NFWRadius[m]);
- break;
- }
- }
-
- /* 6-8) Perseus */
-
- if (SphereType[sphere] == 6 || SphereType[sphere] == 7 || SphereType[sphere] == 8) {
FLOAT xpos, ypos, zpos, vc, rz;
x1 = r/SphereCoreRadius[sphere];
dens1 = SphereDensity[sphere]/(x1*(1.0+x1)*(1.0+x1));
- /* 6) Perseus_old */
- if (SphereType[sphere] == 6) {
- gas_dens1 =mh*(0.0192/(1.0+pow(r*LengthUnits/(18.0e-3*Mpc),3.0))+0.046/pow((1.0+pow(r*LengthUnits/(57.0e-3*Mpc), 2.0)), 1.8)+0.00563/pow((1.0+pow(r*LengthUnits/(200.0e-3*Mpc), 2.0)), 1.1))/DensityUnits/0.88;
- temp1 = 8.12e7*(1.0+pow(r*LengthUnits/(1.0e-3*Mpc*71),3))/(2.3 + pow(r*LengthUnits/(1.0e-3*Mpc*71),3));// in K
- }
- /* 7) Perseus no gas self-gravity, force HSE */
-
- if (SphereType[sphere] == 7 || SphereType[sphere] == 8) {
- for (m = 1; m < NFW_POINTS; m++)
+ /* 6 Perseus with self-gravity */
+ /* 3, 7, 8 Perseus no gas self-gravity, force HSE */
+ for (m = 1; m < NFW_POINTS; m++)
if (r > NFWRadius[m]) {
temp1 = NFWTemp[m] + (NFWTemp[m-1] - NFWTemp[m])*
(r - NFWRadius[m])/(NFWRadius[m-1] - NFWRadius[m]); // in K
- gas_dens1 = (GasDensity[m] + (GasDensity[m-1] - GasDensity[m])*
- (r - NFWRadius[m])/(NFWRadius[m-1] - NFWRadius[m]))/DensityUnits; // in code unit
+ if (SphereType[sphere] == 6) {
+ gas_dens1 =mh*(0.0192/(1.0+POW(r*LengthUnits/(18.0e-3*Mpc),3.0))+0.046/pow((1.0+pow(r*LengthUnits/(57.0e-3*Mpc), 2.0)), 1.8)+
+ 0.0048/POW((1.0+pow(r*LengthUnits/(200.0e-3*Mpc), 2.0)), 1.1))/DensityUnits/0.88;
+ }
+ else{
+ gas_dens1 = (GasDensity[m] + (GasDensity[m-1] - GasDensity[m])*
+ (r - NFWRadius[m])/(NFWRadius[m-1] - NFWRadius[m]))/DensityUnits; // in code unit
+ }
break; // break when NFWRadius just drops below r
- }
}
/* Loop over dims if using Zeus (since vel's face-centered). */
@@ -325,7 +305,7 @@
vc = ClusterInitialSpinParameter*sqrt(GravConst*PointSourceGravityConstant*SolarMass/(PointSourceGravityCoreRadius)); /*in GCS unit*/
- rz = sqrt(pow(fabs(xpos), 2) + pow(fabs(ypos), 2));
+ rz = sqrt(POW(fabs(xpos), 2) + pow(fabs(ypos), 2));
rz = max(rz, 0.1*CellWidth[0][0]);
if (r > 6.25e-4) { //10kpc
@@ -345,7 +325,6 @@
Velocity[2] = 0;
}
} // end: loop over dims
- } // end: Perseus
/* If the density is larger than the background (or the previous
@@ -383,15 +362,12 @@
if (HydroMethod != Zeus_Hydro)
for (dim = 0; dim < GridRank; dim++)
- BaryonField[1][n] += 0.5*pow(BaryonField[ivel+dim][n], 2);
+ BaryonField[1][n] += 0.5*POW(BaryonField[ivel+dim][n], 2);
} // end loop over grid
} // end loop SetupLoopCount
- if (SphereUseParticles && debug)
- printf("ClusterInitialize: NumberOfParticles = %d\n",
- NumberOfParticles);
return SUCCESS;
}
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_ClusterSMBHEachGridGasMass.C
--- a/src/enzo/Grid_ClusterSMBHEachGridGasMass.C
+++ b/src/enzo/Grid_ClusterSMBHEachGridGasMass.C
@@ -64,15 +64,13 @@
}
int dim = 0;
- float DiskRadius, ClusterSMBHDiskRadius = 0.5; //ClusterSMBHDiskRadiu make parameter?
+ float DiskRadius; //ClusterSMBHDiskRadius = 0.5; ClusterSMBHDiskRadiu now a parameter.
DiskRadius = ClusterSMBHDiskRadius*kpc/LengthUnits; //from kpc to codeunits
for (dim = 0; dim < GridRank; dim++) {
DiskCenter[dim] = PointSourceGravityPosition[dim];
DiskLeftCorner[dim] = PointSourceGravityPosition[dim]- DiskRadius;
DiskRightCorner[dim] = PointSourceGravityPosition[dim] + DiskRadius;
}
-// printf("DiskLeftCorner = %g %g %g\n", DiskLeftCorner[0],DiskLeftCorner[1],DiskLeftCorner[2]);
-// printf("DiskRightCorner = %g %g %g\n", DiskRightCorner[0],DiskRightCorner[1],DiskRightCorner[2]);
/* Compute indices of disk region. */
@@ -93,11 +91,12 @@
} // end: loop over dim
-// printf("DiskStartIndex = %d %d %d\n", DiskStartIndex[0],DiskStartIndex[1],DiskStartIndex[2]);
-// printf("DiskEndIndex = %d %d %d\n", DiskEndIndex[0],DiskEndIndex[1],DiskEndIndex[2]);
int i, j, k, size = GridDimension[0]*GridDimension[1]*GridDimension[2];
float ColdGasTemperature = 3.0e4; //in K--parameter?
+ if (ClusterSMBHCalculateGasMass == 4){
+ ColdGasTemperature = 3.0e8; //basically whatever--everything gets accreted
+ }
float *BaryonFieldTemperature = new float[size]; // i.e. temperature
if (BaryonFieldTemperature == NULL)
ENZO_FAIL("Unable to allocate Temperature field in Grid_ClusterSMBHEachGridGasMass.");
@@ -105,15 +104,14 @@
for (k = DiskStartIndex[2]; k <= DiskEndIndex[2]; k++) {
for (j = DiskStartIndex[1]; j <= DiskEndIndex[1]; j++) {
for (i = DiskStartIndex[0]; i <= DiskEndIndex[0]; i++) {
-// printf("BaryonFieldTemperature[GRIDINDEX_NOGHOST(i,j,k) = %g \n", BaryonFieldTemperature[GRIDINDEX_NOGHOST(i,j,k)]);
if (BaryonFieldTemperature[GRIDINDEX_NOGHOST(i,j,k)] < ColdGasTemperature)
- ClusterSMBHColdGasMass += BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)]*pow(CellWidth[0][0],3); //Assuming it is refined to the highest refinement level (otherwise we should use the CellWidth at the exact position.)
-// printf("BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)] and ClusterSMBHColdGasMass = %g %g \n", BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)], ClusterSMBHColdGasMass);
-//take out part of the mass in ClusterSMBHFeedback?
+ ClusterSMBHColdGasMass += BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)]*POW(CellWidth[0][0],3); //Assuming it is refined to the highest refinement level (otherwise we should use the CellWidth at the exact position.)
}
}
}
-// printf("Each Grid ClusterSMBHColdGasMass = %g \n", ClusterSMBHColdGasMass);
+ if (ClusterSMBHCalculateGasMass == 4){
+ ClusterSMBHAccretionTime=dtFixed*TimeUnits; /*now in s*/
+ }
delete [] BaryonFieldTemperature;
return SUCCESS;
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_ClusterSMBHFeedback.C
--- a/src/enzo/Grid_ClusterSMBHFeedback.C
+++ b/src/enzo/Grid_ClusterSMBHFeedback.C
@@ -40,16 +40,14 @@
if (MyProcessorNumber != ProcessorNumber)
return SUCCESS;
- /* Return if not on most-refined level. */
- if (level != MaximumRefinementLevel)
- return SUCCESS;
- //printf("in Feedback ClusterSMBHFeedbackSwitch = %d \n", ClusterSMBHFeedbackSwitch);
+ /* Return if not on most-refined level. */
+ if (ClusterSMBHCalculateGasMass != 4 && level != MaximumRefinementLevel)
+ return SUCCESS; /*jet is not on the most-refined level only for Bondi*/
- /* Return if using method 1 and Switch is off. */
+ /* Return if using method 1 or 2 and Switch is off. */
if (ClusterSMBHCalculateGasMass != 0 && ClusterSMBHFeedbackSwitch == FALSE)
return SUCCESS;
- //printf("starting feedback");
int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
if (this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num,
Vel3Num, TENum) == FAIL) ///this or thisgrid
@@ -74,21 +72,19 @@
fprintf(stderr, "Error in GetUnits.\n");
return FAIL;
}
- MassUnits = DensityUnits*pow(LengthUnits,3);
+ MassUnits = DensityUnits*POW(LengthUnits,3);
/* If Time is earlier than ClusterSMBHStartTime, return. */
if (Time-ClusterSMBHStartTime < 0.0)
return SUCCESS;
int i, j, k, dim = 0;
int jet_dim; // z-axis (should make parameter?)
-// jet_dim = (int(ClusterSMBHJetAnglePhi/2.0)+2) % 3;
jet_dim = ClusterSMBHJetDim % 3;
-// printf("jet_dim= %d \n",jet_dim);
float JetScaleRadius; // cellwidths
float JetMdot; // Jet mass flow in SolarMass/year (need to convert units)-- gets value from parameter ClusterSMBHJetMdot
float JetVelocity, FastJetVelocity; // Jet Velocity in km/s (should make parameter)-- gets value from parameter ClusterSMBHJetVelocity
JetScaleRadius = ClusterSMBHJetRadius/2.0; //JetScaleRadius is half the radius of the jet launch region in cellwidths
- float DiskRadius, ClusterSMBHDiskRadius = 0.5; //ClusterSMBHDiskRadiu make parameter?
+ float DiskRadius; //ClusterSMBHDiskRadius = 0.5; //ClusterSMBHDiskRadiu now a parameter
DiskRadius = ClusterSMBHDiskRadius*kpc/LengthUnits; //from kpc to codeunits
for (dim = 0; dim < GridRank; dim++) {
@@ -104,8 +100,9 @@
DiskRightCorner[dim] = PointSourceGravityPosition[dim] + DiskRadius;
}
- JetLeftCorner[jet_dim] -= ClusterSMBHJetLaunchOffset*CellWidth[jet_dim][0];
- JetRightCorner[jet_dim] += ClusterSMBHJetLaunchOffset*CellWidth[jet_dim][0];
+ JetLeftCorner[jet_dim] -= ClusterSMBHJetLaunchOffset*kpc/LengthUnits;
+ JetRightCorner[jet_dim] += ClusterSMBHJetLaunchOffset*kpc/LengthUnits; //from kpc to codeunits
+
/* Compute indices of jet launch region. */
@@ -125,6 +122,14 @@
if (JetStartIndex[dim] > GridDimension[dim]-1 || JetEndIndex[dim] < 0)
JetOnGrid = false;
+ /*For Bondi*/
+ if (ClusterSMBHCalculateGasMass == 4 && level != MultiRefineRegionMaximumOuterLevel)
+ JetOnGrid = false;
+
+ /*When not Bondi*/
+ if (ClusterSMBHCalculateGasMass != 4 && level != MaximumRefinementLevel)
+ JetOnGrid = false;
+
DiskStartIndex[dim] = nint((DiskLeftCorner[dim] - CellLeftEdge[dim][0] - 0.5*CellWidth[dim][0])/CellWidth
[dim][0]);
DiskEndIndex[dim] = nint((DiskRightCorner[dim] - CellLeftEdge[dim][0] - 0.5*CellWidth[dim][0])/CellWidth[
@@ -144,44 +149,40 @@
if (jet_dim == 2){
for (j = JetStartIndex[1]; j <= JetEndIndex[1]; j++) {
for (i = JetStartIndex[0]; i <= JetEndIndex[0]; i++) {
- radius = sqrt(pow((CellLeftEdge[0][0] + (i+0.5)*CellWidth[0][0] - JetCenter[0]), 2) +
- pow((CellLeftEdge[1][0] + (j+0.5)*CellWidth[1][0] - JetCenter[1]), 2) )/
+ radius = sqrt(POW((CellLeftEdge[0][0] + (i+0.5)*CellWidth[0][0] - JetCenter[0]), 2) +
+ POW((CellLeftEdge[1][0] + (j+0.5)*CellWidth[1][0] - JetCenter[1]), 2) )/
CellWidth[0][0];
- JetNormalization += exp(-pow(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
+ JetNormalization += exp(-POW(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
}
}
}
else if(jet_dim == 0){
for (j = JetStartIndex[1]; j <= JetEndIndex[1]; j++) {
for (k = JetStartIndex[2]; k <= JetEndIndex[2]; k++) {
- radius = sqrt(pow((CellLeftEdge[2][0] + (k+0.5)*CellWidth[2][0] - JetCenter[2]), 2) +
- pow((CellLeftEdge[1][0] + (j+0.5)*CellWidth[1][0] - JetCenter[1]), 2) )/
+ radius = sqrt(POW((CellLeftEdge[2][0] + (k+0.5)*CellWidth[2][0] - JetCenter[2]), 2) +
+ POW((CellLeftEdge[1][0] + (j+0.5)*CellWidth[1][0] - JetCenter[1]), 2) )/
CellWidth[0][0];
- JetNormalization += exp(-pow(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
+ JetNormalization += exp(-POW(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
}
}
}
if (jet_dim == 1){
for (k = JetStartIndex[2]; k <= JetEndIndex[2]; k++) {
for (i = JetStartIndex[0]; i <= JetEndIndex[0]; i++) {
- radius = sqrt(pow((CellLeftEdge[0][0] + (i+0.5)*CellWidth[0][0] - JetCenter[0]), 2) +
- pow((CellLeftEdge[2][0] + (k+0.5)*CellWidth[2][0] - JetCenter[2]), 2) )/
+ radius = sqrt(POW((CellLeftEdge[0][0] + (i+0.5)*CellWidth[0][0] - JetCenter[0]), 2) +
+ POW((CellLeftEdge[2][0] + (k+0.5)*CellWidth[2][0] - JetCenter[2]), 2) )/
CellWidth[0][0];
- JetNormalization += exp(-pow(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
+ JetNormalization += exp(-POW(radius/JetScaleRadius,2)/2.0); //add 2!!!!!!!!!!!!!!, print stqtement
}
}
}
/* Convert to code units. */
- density_normalization = (JetMdot/JetNormalization)*dtFixed/pow(CellWidth[0][0], 3);
+ density_normalization = (JetMdot/JetNormalization)*dtFixed/POW(CellWidth[0][0], 3);
Tramp = ClusterSMBHTramp*1.0e6*3.1557e7/TimeUnits; // from Myr to code units
-// SlowJetVelocity = ClusterSMBHJetVelocity*1.0e5/VelocityUnits; //from km/s to code units //
JetVelocity = sqrt((ClusterSMBHJetEdot*1.0e44*ClusterSMBHKineticFraction*2)/(ClusterSMBHJetMdot*SolarMass/3.1557e7))/VelocityUnits;
JetVelocity *= min((Time-ClusterSMBHStartTime)/Tramp, 1.0); //linear ramp
-// FastJetVelocity = ClusterSMBHFastJetVelocity*1.0e5/VelocityUnits; //from km/s to code units
-// FastJetVelocity *= min((Time-ClusterSMBHStartTime)/Tramp, 1.0); //linear ramp
-// JetVelocity *= 0.5*tanh(5.0*((Time-ClusterSMBHStartTime)/Tramp-0.5)+1.0); // tanh ramp
/* Clip edge of jet launching disk so we don't set cell off the edge of the grid. */
@@ -202,9 +203,9 @@
for (i = JetStartIndex[0]; i <= JetEndIndex[0]; i++) {
xpos = CellLeftEdge[0][i] + 0.5*CellWidth[0][i] - JetCenter[0]; //in the cell surface center
ypos = CellLeftEdge[1][j] + 0.5*CellWidth[1][j] - JetCenter[1]; //not in cellwidth
- radius = sqrt(pow(xpos,2) + pow(ypos, 2))/CellWidth[0][0]; //in cell width
- density_add = density_normalization*exp(-pow(radius/JetScaleRadius,2)/2.0);
- energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*pow(JetVelocity, 2.0);
+ radius = sqrt(POW(xpos,2) + POW(ypos, 2))/CellWidth[0][0]; //in cell width
+ density_add = density_normalization*exp(-POW(radius/JetScaleRadius,2)/2.0);
+ energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*POW(JetVelocity, 2.0);
//JetVelocity = (radius > ClusterSMBHFastJetRadius) ? SlowJetVelocity : FastJetVelocity;
if (ClusterSMBHJetOpenAngleRadius < 0.00001) { // if jet openning angle = 0, set ClusterSMBHJetOpenAngleRadius=0
JetVelocity_z = JetVelocity*cos(ClusterSMBHJetAngleTheta*pi);
@@ -214,8 +215,8 @@
ClusterSMBHJetAnglePhi = Time*2.0/(ClusterSMBHJetPrecessionPeriod*1.0e6*3.1557e7/TimeUnits); // ClusterSMBHJetPrecessionPeriod from Myr to codeunit; *2.0 instead of 2*pi because pi is used later
}
else {
- JetVelocity_z = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
- JetVelocity_xy = JetVelocity * radius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
+ JetVelocity_z = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
+ JetVelocity_xy = JetVelocity * radius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
JetVelocity_x = JetVelocity_xy * (xpos/CellWidth[0][0]) / radius;
JetVelocity_y = JetVelocity_xy * (ypos/CellWidth[0][0]) / radius;
}
@@ -252,7 +253,6 @@
BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)] = density_ratio * JetVelocity_y*sin(ClusterSMBHJetAngleTheta*pi)*sin(ClusterSMBHJetAnglePhi*pi) + (1.0-density_ratio)*BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)];
BaryonField[Vel3Num][GRIDINDEX_NOGHOST(i,j,k)] = density_ratio*JetVelocity_z + (1.0-density_ratio)*BaryonField[Vel3Num][GRIDINDEX_NOGHOST(i,j,k)];
BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] = BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)]*(1.0-density_ratio) + energy_add/BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)];
- //printf("upper jet BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)] = %g \n", BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)]);
} //end top jet
}
}
@@ -265,9 +265,9 @@
for (k = JetStartIndex[2]; k <= JetEndIndex[2]; k++) {
zpos = CellLeftEdge[2][k] + 0.5*CellWidth[2][k] - JetCenter[2]; //in the cell surface center
ypos = CellLeftEdge[1][j] + 0.5*CellWidth[1][j] - JetCenter[1]; //not in cellwidth
- radius = sqrt(pow(zpos,2) + pow(ypos, 2))/CellWidth[0][0]; //in cell width
- density_add = density_normalization*exp(-pow(radius/JetScaleRadius,2)/2.0);
- energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*pow(JetVelocity, 2.0);
+ radius = sqrt(POW(zpos,2) + POW(ypos, 2))/CellWidth[0][0]; //in cell width
+ density_add = density_normalization*exp(-POW(radius/JetScaleRadius,2)/2.0);
+ energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*POW(JetVelocity, 2.0);
if (ClusterSMBHJetOpenAngleRadius < 0.00001) { // if jet openning angle = 0, set ClusterSMBHJetOpenAngleRadius=0
JetVelocity_x = JetVelocity*cos(ClusterSMBHJetAngleTheta*pi);
JetVelocity_z = JetVelocity; // mutiplied by sincos later
@@ -276,8 +276,8 @@
ClusterSMBHJetAnglePhi = Time*2.0/(ClusterSMBHJetPrecessionPeriod*1.0e6*3.1557e7/TimeUnits); // ClusterSMBHJetPrecessionPeriod from Myr to codeunit; *2.0 instead of 2*pi because pi is used later
}
else {
- JetVelocity_x = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
- JetVelocity_zy = JetVelocity * radius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
+ JetVelocity_x = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
+ JetVelocity_zy = JetVelocity * radius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
JetVelocity_z = JetVelocity_zy * (zpos/CellWidth[0][0]) / radius;
JetVelocity_y = JetVelocity_zy * (ypos/CellWidth[0][0]) / radius;
}
@@ -327,9 +327,9 @@
for (k = JetStartIndex[2]; k <= JetEndIndex[2]; k++) {
zpos = CellLeftEdge[2][k] + 0.5*CellWidth[2][k] - JetCenter[2]; //in the cell surface center
xpos = CellLeftEdge[0][i] + 0.5*CellWidth[0][i] - JetCenter[0]; //not in cellwidth
- radius = sqrt(pow(zpos,2) + pow(xpos, 2))/CellWidth[0][0]; //in cell width
- density_add = density_normalization*exp(-pow(radius/JetScaleRadius,2)/2.0);
- energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*pow(JetVelocity, 2.0);
+ radius = sqrt(POW(zpos,2) + POW(xpos, 2))/CellWidth[0][0]; //in cell width
+ density_add = density_normalization*exp(-POW(radius/JetScaleRadius,2)/2.0);
+ energy_add = ((1.0-ClusterSMBHKineticFraction)/ClusterSMBHKineticFraction)*0.5*density_add*POW(JetVelocity, 2.0);
if (ClusterSMBHJetOpenAngleRadius < 0.00001) { // if jet openning angle = 0, set ClusterSMBHJetOpenAngleRadius=0
JetVelocity_y = JetVelocity*cos(ClusterSMBHJetAngleTheta*pi);
JetVelocity_z = JetVelocity; // mutiplied by sincos later
@@ -338,8 +338,8 @@
ClusterSMBHJetAnglePhi = Time*2.0/(ClusterSMBHJetPrecessionPeriod*1.0e6*3.1557e7/TimeUnits); // ClusterSMBHJetPrecessionPeriod from Myr to codeunit; *2.0 instead of 2*pi because pi is used later
}
else {
- JetVelocity_y = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
- JetVelocity_zx = JetVelocity * radius / sqrt(pow(ClusterSMBHJetOpenAngleRadius, 2) + pow(radius, 2));
+ JetVelocity_y = JetVelocity * ClusterSMBHJetOpenAngleRadius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
+ JetVelocity_zx = JetVelocity * radius / sqrt(POW(ClusterSMBHJetOpenAngleRadius, 2) + POW(radius, 2));
JetVelocity_z = JetVelocity_zx * (zpos/CellWidth[0][0]) / radius;
JetVelocity_x = JetVelocity_zx * (ypos/CellWidth[0][0]) / radius;
}
@@ -380,16 +380,21 @@
}
}
} //end if jet_dim == 1
-}
+} // end JetOnGrid==true
/* loop over cells of disk, remove mass. */
+ /* Return if not on most-refined level. */
+ if (level != MaximumRefinementLevel)
+ return SUCCESS;
+
if (DiskOnGrid == true && ClusterSMBHCalculateGasMass != 0){
- if (ClusterSMBHColdGasMass < 0.0001)
- return SUCCESS;
float AccretionRate = JetMdot*2.0; // in codeunit *2 because Mdot is Mdot of one jet. There are two jets!
int size = GridDimension[0]*GridDimension[1]*GridDimension[2];
- float ColdGasTemperature = 3.0e4;
+ float ColdGasTemperature = 3.0e4; //in K--parameter?
+ if (ClusterSMBHCalculateGasMass == 4){
+ ColdGasTemperature = 3.0e8; //basically whatever--everything gets accreted
+ }
float *BaryonFieldTemperature = new float[size]; // i.e. temperature
if (BaryonFieldTemperature == NULL)
ENZO_FAIL("Unable to allocate Temperature field in Grid_ClusterSMBHEachGridGasMass.");
@@ -398,7 +403,13 @@
for (j = DiskStartIndex[1]; j <= DiskEndIndex[1]; j++) {
for (i = DiskStartIndex[0]; i <= DiskEndIndex[0]; i++) {
if (BaryonFieldTemperature[GRIDINDEX_NOGHOST(i,j,k)] < ColdGasTemperature)
- BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)] *= 1.0 - AccretionRate*dtFixed/ClusterSMBHColdGasMass; //take out part of the mass
+ {if (ClusterSMBHCalculateGasMass == 4){
+ BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)]=1.0e-35;
+ }
+ else{
+ BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)] *= 1.0 - AccretionRate*dtFixed/ClusterSMBHColdGasMass; //take out part of the mass
+ }
+ }
}
}
}
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_ComputeAccelerationFieldExternal.C
--- a/src/enzo/Grid_ComputeAccelerationFieldExternal.C
+++ b/src/enzo/Grid_ComputeAccelerationFieldExternal.C
@@ -181,18 +181,34 @@
// and 1.0 otherwise.
//Yuan, Aug 2011: Add BCG and SMBH potential if ProblemType == 108
if(ProblemType == 108){
- accel = GravConst*PointSourceGravityConstant*SolarMass*
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x )-x /(1.0+x )) /
+ (log(1.0+1.0)-1.0/(1.0+1.0))) /
+ POW(radius*LengthUnits, 2.0) / AccelUnits +
+ ClusterSMBHBCG*POW((POW(POW(radius*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
+ POW(POW(radius*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) / AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass / POW(radius*LengthUnits, 2) / AccelUnits; // + BCG + BH mass
+ /*Bondi*/
+ if(ClusterSMBHCalculateGasMass == 4){
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
((log(1.0+x )-x /(1.0+x )) /
(log(1.0+1.0)-1.0/(1.0+1.0))) /
POW(radius*LengthUnits, 2.0) / AccelUnits +
- POW((POW(POW(radius*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) + POW(POW(radius*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) / AccelUnits + GravConst*SolarMass*3.4e8 / POW(radius*LengthUnits, 2) / AccelUnits ; // + BCG + BH mass for Perseus;
- } else {
- accel = GravConst*PointSourceGravityConstant*MassUnitsDouble*
- ((log(1.0+x )-x /(1.0+x )) /
- (log(1.0+1.0)-1.0/(1.0+1.0))) /
- POW(radius*LengthUnits, 2.0) / AccelUnits;
+ ClusterSMBHBCG*POW((POW(POW(radius*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
+ POW(POW(radius*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) / AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass/POW(radius*LengthUnits - 2.0*GravConst*SolarMass/POW(clight,2), 2)/ AccelUnits;
+ }
+ /*Elliptical Galaxy Fixed Gravity*/
+ if(EllipticalGalaxyRe > 0.001){
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x )-x /(1.0+x )) /
+ (log(1.0+1.0)-1.0/(1.0+1.0))) /
+ POW(radius*LengthUnits, 2.0) / AccelUnits +
+ GravConst*(ClusterSMBHBCG*SolarMass*1.0e11)/POW(radius*LengthUnits+EllipticalGalaxyRe*1.0e-3*Mpc/1.8153, 2)/AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass/POW(radius*LengthUnits - 2.0*GravConst*SolarMass/POW(clight,2), 2)/ AccelUnits;
+ }
}
- accel = accel/radius; // this radius normalizes the multiplication by
+ accel = accel/radius; // this radius normalizes the multiplication by
// xpos,ypos,zpos done below
} else if (PointSourceGravity == 3) {
@@ -282,6 +298,35 @@
((log(1.0+x )-x /(1.0+x )) /
(log(1.0+1.0)-1.0/(1.0+1.0))) /
POW(radius*LengthUnits, 2) / AccelUnits;
+ /*Yuan, Aug 2011: Add BCG and SMBH potential if ProblemType == 108*/
+ if(ProblemType == 108){
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x )-x /(1.0+x )) /
+ (log(1.0+1.0)-1.0/(1.0+1.0))) /
+ POW(radius*LengthUnits, 2.0) / AccelUnits +
+ ClusterSMBHBCG*POW((POW(POW(radius*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
+ POW(POW(radius*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) / AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass / POW(radius*LengthUnits, 2) / AccelUnits; // + BCG + BH mass
+ /*Bondi*/
+ if(ClusterSMBHCalculateGasMass == 4){
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x )-x /(1.0+x )) /
+ (log(1.0+1.0)-1.0/(1.0+1.0))) /
+ POW(radius*LengthUnits, 2.0) / AccelUnits +
+ ClusterSMBHBCG*POW((POW(POW(radius*LengthUnits/(1.0e-3*Mpc),0.5975)/3.206e-7,0.9) +
+ POW(POW(radius*LengthUnits/(1.0e-3*Mpc), 1.849)/1.861e-6, 0.9)), -1.0/0.9) / AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass/POW(radius*LengthUnits - 2.0*GravConst*SolarMass/POW(clight,2), 2)/ AccelUnits;
+ }
+ /*Elliptical Galaxy Fixed Gravity*/
+ if(EllipticalGalaxyRe > 0.001){
+ accel = GravConst*PointSourceGravityConstant*SolarMass*
+ ((log(1.0+x )-x /(1.0+x )) /
+ (log(1.0+1.0)-1.0/(1.0+1.0))) /
+ POW(radius*LengthUnits, 2.0) / AccelUnits +
+ GravConst*(ClusterSMBHBCG*SolarMass*1.0e11)/POW(radius*LengthUnits+EllipticalGalaxyRe*1.0e-3*Mpc/1.8153, 2)/AccelUnits +
+ GravConst*SolarMass*ClusterSMBHMass/POW(radius*LengthUnits - 2.0*GravConst*SolarMass/POW(clight,2), 2)/ AccelUnits;
+ }
+ }
accel = accel/radius; // this radius normalizes the multiplication by xpos,ypos,zpos done below
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_ComputeExternalNohBoundary2D.C
--- a/src/enzo/Grid_ComputeExternalNohBoundary2D.C
+++ /dev/null
@@ -1,183 +0,0 @@
-/***********************************************************************
-/
-/ GRID CLASS (COMPUTE RIGHT BOUNDARIES FOR 2D NOH PROBLEM BASED ON
-/ THE EXACT ANALYTICAL SOLUTION)
-/
-/ written by: Greg Bryan
-/ date: November, 1994
-/ modified1: Alexei Kritsuk, May 2005 - Nov 2005; Feb 2007
-/
-/ PURPOSE:
-/ This function computes boundary values for the external boundaries
-/ using analytical formulae:
-/ density = (1 + t/sqrt(x^2 + y^2)) 2D-case
-/ pressure = 1e-6
-/ velocity = 1 (radial)
-/
-/ 2D test is the only only supported.
-/
-/ RETURNS: FAIL or SUCCESS
-/
-************************************************************************/
-
-#include <stdio.h>
-#include <math.h>
-#include "ErrorExceptions.h"
-#include "macros_and_parameters.h"
-#include "typedefs.h"
-#include "global_data.h"
-#include "Fluxes.h"
-#include "GridList.h"
-#include "ExternalBoundary.h"
-#include "Grid.h"
-
-/* ComputeExternalNohBoundary function. It is assumed that subgrids do not
- touch the time-dependent external boundary, so this function will not
- operate on AMR subgrids. */
-
-int grid::ComputeExternalNohBoundary()
-{
-
- float d0 = 1.0, p0 = 1.0e-6, u0 = -1.0;
-
- /* Return if this doesn't involve us. */
-
- if (MyProcessorNumber != ProcessorNumber)
- return SUCCESS;
- // ERROR_MESSAGE; // THIS ROUTINE MUST(?) BE MODIFIED TO SUPPORT DOMAIN DECOMPOSITION
-
- if (NumberOfBaryonFields > 0) {
-
-
- /* Compute offsets from right and left domain edges. */
-
- int dim, GridOffsetLeft[MAX_DIMENSION], GridOffsetRight[MAX_DIMENSION];
- for (dim = 0; dim < MAX_DIMENSION; dim++)
- if (dim < GridRank) {
- GridOffsetLeft[dim] = nint(( GridLeftEdge[dim] - DomainLeftEdge[dim])/
- CellWidth[dim][0]);
- GridOffsetRight[dim] = nint((GridRightEdge[dim] - DomainRightEdge[dim])/
- CellWidth[dim][0]);
- }
- else {
- GridOffsetLeft[dim] = 0;
- GridOffsetRight[dim] = 0;
- }
-
- /* Compute needed portion of the field on current grid,
- only if this grid is in contact with the domain boundary.
- Do NOT include (0,1) and (1,0)-corners of the domain (if
- only the upper right quadrant is used, not the full box), but
- include corners for those subgrids which do not cover any
- of those domain corners.
- */
-
- int i, j, index,
- ishift = GridStartIndex[0],
- jshift = GridStartIndex[1];
-
- if (NohProblemFullBox == 1) {
- if ( GridDimension[0]%2 != 0 ||
- (GridDimension[1]%2 != 0 && GridRank > 1))
- ERROR_MESSAGE; // this must be eventually redone; works for serial runs only
- ishift += nint((DomainRightEdge[0] - DomainLeftEdge[0])/CellWidth[0][0])/2;
- jshift += nint((DomainRightEdge[1] - DomainLeftEdge[1])/CellWidth[1][0])/2;
- }
-
- int istart = 0, jstart = 0;
- if (GridOffsetLeft[0] == 0 && NohProblemFullBox == 0)
- istart = GridStartIndex[0];
- else ishift -= GridOffsetLeft[0];
-
- if (GridOffsetLeft[1] == 0 && NohProblemFullBox == 0)
- jstart = GridStartIndex[1];
- else jshift -= GridOffsetLeft[1];
-
- float radius, xx, yy;
- float time = Time;
-
- /* j-sweeps, LEFT boundary. */
-
- if (GridOffsetLeft[0] == 0 && NohProblemFullBox == 1)
- for (j = jstart; j < GridDimension[1]; j++) {
- index = j*GridDimension[0];
- yy = j + 0.5 - jshift;
- for (i = 0; i < GridStartIndex[0]; i++) {
- xx = i + 0.5 - ishift;
- radius = max(tiny_number, sqrt(xx*xx + yy*yy));
- BaryonField[0][index+i] = d0 + time/radius/CellWidth[0][0];
- BaryonField[1][index+i] = p0/(Gamma-1.0)/d0;
- BaryonField[2][index+i] = u0*xx/radius;
- BaryonField[3][index+i] = u0*yy/radius;
- if (HydroMethod != Zeus_Hydro)
- BaryonField[1][index+i] += 0.5*u0*u0;
- }
- }
-
- /* i-sweeps, LEFT boundary. */
-
- if (GridOffsetLeft[1] == 0 && NohProblemFullBox == 1)
- for (j = 0; j < GridStartIndex[1]; j++) {
- index = j*GridDimension[0];
- yy = j + 0.5 - jshift;
- for (i = istart; i < GridDimension[0]; i++) {
- xx = i + 0.5 - ishift;
- radius = max(tiny_number, sqrt(xx*xx + yy*yy));
- BaryonField[0][index+i] = d0 + time/radius/CellWidth[0][0];
- BaryonField[1][index+i] = p0/(Gamma-1.0)/d0;
- BaryonField[2][index+i] = u0*xx/radius;
- BaryonField[3][index+i] = u0*yy/radius;
- if (HydroMethod != Zeus_Hydro)
- BaryonField[1][index+i] += 0.5*u0*u0;
- }
- }
-
- /* j-sweeps, only RIGHT boundary. */
-
- if (GridOffsetRight[0] == 0)
- for (j = jstart; j < GridDimension[1]; j++) {
- index = j*GridDimension[0];
- yy = j + 0.5 - jshift;
- for (i = GridEndIndex[0]+1; i < GridDimension[0]; i++) {
- xx = i + 0.5 - ishift;
- radius = max(tiny_number, sqrt(xx*xx + yy*yy));
- BaryonField[0][index+i] = d0 + time/radius/CellWidth[0][0];
- BaryonField[1][index+i] = p0/(Gamma-1.0)/d0;
- BaryonField[2][index+i] = u0*xx/radius;
- BaryonField[3][index+i] = u0*yy/radius;
- if (HydroMethod != Zeus_Hydro)
- BaryonField[1][index+i] += 0.5*u0*u0;
- }
- }
-
-
- /* i-sweeps, only RIGHT boundary. */
-
- if (GridOffsetRight[1] == 0)
- for (j = GridEndIndex[1]+1; j < GridDimension[1]; j++) {
- index = j*GridDimension[0];
- yy = j + 0.5 - jshift;
- for (i = istart; i < GridDimension[0]; i++) {
- xx = i + 0.5 - ishift;
- radius = max(tiny_number, sqrt(xx*xx + yy*yy));
- BaryonField[0][index+i] = d0 + time/radius/CellWidth[0][0];
- BaryonField[1][index+i] = p0/(Gamma-1.0)/d0;
- BaryonField[2][index+i] = u0*xx/radius;
- BaryonField[3][index+i] = u0*yy/radius;
- if (HydroMethod != Zeus_Hydro)
- BaryonField[1][index+i] += 0.5*u0*u0;
- }
- }
-
- /* DualEnergyFormalism not supported. */
-
- if (DualEnergyFormalism)
- ERROR_MESSAGE;
-
- } // end: if (NumberOfBaryonFields > 0)
-
- this->DebugCheck("ComputeExternalNohBoundary (after)");
-
- return SUCCESS;
-
-}
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_CorrectForRefinedFluxes.C
--- a/src/enzo/Grid_CorrectForRefinedFluxes.C
+++ b/src/enzo/Grid_CorrectForRefinedFluxes.C
@@ -361,6 +361,16 @@
fieldNumberList.push_back(GENum);
}
}
+ // When FluxCorrection is set to 2, species will be directly corrected
+ // like density, total energy, and internal energy
+ if (FluxCorrection == 2){
+ for (field = 0; field < NumberOfBaryonFields; field++) {
+ if (FieldType[field] >= ElectronDensity &&
+ FieldType[field] <= ExtraType1) {
+ fieldNumberList.push_back(field);
+ }
+ }
+ }
for (field = 0; field < NumberOfBaryonFields; field++) {
// Multiply faces by density to get conserved quantities
@@ -388,6 +398,7 @@
// them by the new density (species are not otherwise modified --
// see the next comment). This ensures that the species are changed
// to keep the same fractional density.
+ if (FluxCorrection == 1) { // Skip this routine when FluxCorrection = 2
if (
(
(FieldType[field] >= ElectronDensity
@@ -410,6 +421,7 @@
}
}
}
+ }
if (FieldTypeNoInterpolate(FieldType[field]) == FALSE
// Density, total energy, and internal energy were already
@@ -532,7 +544,10 @@
if ((FieldTypeIsDensity(FieldType[field]) == TRUE ||
FieldType[field] == TotalEnergy ||
- FieldType[field] == InternalEnergy)) {
+ FieldType[field] == InternalEnergy ||
+ ( FieldType[field] >= ElectronDensity &&
+ FieldType[field] <= ExtraType1 )
+ )) {
/* If new density & energy is < 0 then undo the
flux correction. */
@@ -704,7 +719,10 @@
/* Check for positivity and undo flux correction if negative */
if ((FieldType[field] == Density ||
FieldType[field] == TotalEnergy ||
- FieldType[field] == InternalEnergy) &&
+ FieldType[field] == InternalEnergy ||
+ ( FieldType[field] >= ElectronDensity &&
+ FieldType[field] <= ExtraType1 )
+ ) &&
BaryonField[field][FieldIndex] <= 0) {
/*if (debug) {
printf("CFRFl warn: %e %e %e %d %d %d %d [%d]\n",
@@ -728,7 +746,10 @@
}
if ((FieldType[field] == Density ||
FieldType[field] == TotalEnergy ||
- FieldType[field] == InternalEnergy) &&
+ FieldType[field] == InternalEnergy ||
+ ( FieldType[field] >= ElectronDensity &&
+ FieldType[field] <= ExtraType1 )
+ ) &&
BaryonField[field][FieldIndex + Offset] <= 0.0) {
/*if (debug) {
printf("CFRFr warn: %e %e %e %d %d %d %d (%d) [%d]\n",
@@ -828,6 +849,7 @@
/* Multiply species by density to return from fractional to real
density. (see comments above regarding species). */
+ if (FluxCorrection == 1) {
for (field = 0; field < NumberOfBaryonFields; field++)
if ( ((FieldType[field] >= ElectronDensity &&
FieldType[field] <= ExtraType1) ||
@@ -844,6 +866,7 @@
BaryonField[DensNum][index+Offset];
}
}
+ }
} // if( CorrectLeftBaryonField || CorrectRightBaryonField)
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_OldStarFeedback.C
--- /dev/null
+++ b/src/enzo/Grid_OldStarFeedback.C
@@ -0,0 +1,97 @@
+/***********************************************************************
+/
+/ GRID: ADD Stellar Feedback to Elliptical Galaxies (Hernquist profile)
+/
+/ written by: Yuan Li
+/ date: Aug, 2015
+/ modified1:
+/
+/ PURPOSE: Stellar wind adds mass, and SN Ia adds heat
+/
+************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include "ErrorExceptions.h"
+#include "macros_and_parameters.h"
+#include "typedefs.h"
+#include "global_data.h"
+#include "Fluxes.h"
+#include "GridList.h"
+#include "ExternalBoundary.h"
+#include "Grid.h"
+#include "Hierarchy.h"
+#include "CosmologyParameters.h"
+#include "phys_constants.h"
+int GetUnits(float *DensityUnits, float *LengthUnits,
+ float *TemperatureUnits, float *TimeUnits,
+ float *VelocityUnits, double *MassUnits, FLOAT Time);
+
+int grid::OldStarFeedback()
+{
+ if (MyProcessorNumber != ProcessorNumber)
+ return SUCCESS;
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ if (this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num,
+ Vel3Num, TENum) == FAIL) ///this or thisgrid
+ ENZO_FAIL("Error in IdentifyPhysicalQuantities.");
+
+
+//ClusterSMBHBCG is M_* here
+//EllipticalGalaxyRe is Re in kpc
+//OldStarFeedbackAlpha alpha -19
+
+
+ int i, j, k;
+ float a=0;
+
+ float DensityUnits = 1.0, LengthUnits = 1.0, TemperatureUnits = 1,
+ TimeUnits = 1.0, VelocityUnits = 1.0;
+ double MassUnits=1.0 ;
+
+ if (GetUnits(&DensityUnits, &LengthUnits, &TemperatureUnits,
+ &TimeUnits, &VelocityUnits, &MassUnits, Time) == FAIL) {
+ fprintf(stderr, "Error in GetUnits.\n");
+ return FAIL;
+ }
+
+ float EnergyUnits;
+ EnergyUnits = MassUnits* POW(VelocityUnits, 2.0);
+
+ a=(EllipticalGalaxyRe*3.0856e21/1.8153)/LengthUnits; // in code unit
+
+
+ FLOAT r, x, y = 0, z = 0, rho_star=0;
+
+ for (k = 0; k < GridDimension[2]; k++)
+ for (j = 0; j < GridDimension[1]; j++)
+ for (i = 0; i < GridDimension[0]; i++) {
+
+ /* Compute position */
+
+ x = CellLeftEdge[0][i] + 0.5*CellWidth[0][i];
+ if (GridRank > 1)
+ y = CellLeftEdge[1][j] + 0.5*CellWidth[1][j];
+ if (GridRank > 2)
+ z = CellLeftEdge[2][k] + 0.5*CellWidth[2][k];
+
+ /* Find distance from center (0.5, 0.5, 0.5). */
+
+ r = sqrt(POW(fabs(x-0.5), 2) +
+ POW(fabs(y-0.5), 2) +
+ POW(fabs(z-0.5), 2) );
+ r = max(r, 0.1*CellWidth[0][0]);
+ rho_star=ClusterSMBHBCG*1.989e33*1.0e11*a/(2.0*pi*r*POW((r+a)*LengthUnits,3)); //both r and a are in code units
+ BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] += (0.000475*rho_star*(dtFixed*TimeUnits)*SNIaFeedbackEnergy //SNIa
+ +rho_star*(dtFixed*TimeUnits)*OldStarFeedbackAlpha*1.0e-19*POW(300.0*1.0e5,2)) //Stellar
+ /(EnergyUnits*BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)]);
+ BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)] += rho_star*(dtFixed*TimeUnits)*OldStarFeedbackAlpha*1.0e-19/DensityUnits;
+ }
+
+ return SUCCESS;
+
+}
+
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_SetFlaggingFieldStaticRegions.C
--- a/src/enzo/Grid_SetFlaggingFieldStaticRegions.C
+++ b/src/enzo/Grid_SetFlaggingFieldStaticRegions.C
@@ -49,7 +49,7 @@
/* Loop over static regions. */
for (region = 0; region < MAX_STATIC_REGIONS; region++)
- if (StaticRefineRegionLevel[region] == level) {
+ if (StaticRefineRegionLevel[region] >= level) {
/* Check if there is any overlap. */
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Grid_SolvePrimordialChemistryCVODE.C
--- a/src/enzo/Grid_SolvePrimordialChemistryCVODE.C
+++ /dev/null
@@ -1,1 +0,0 @@
-
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Make.config.objects
--- a/src/enzo/Make.config.objects
+++ b/src/enzo/Make.config.objects
@@ -497,6 +497,7 @@
Grid_MustRefineParticlesFlagInRegion.o \
Grid_NestedCosmologySimulationInitializeGrid.o \
Grid_NohInitializeGrid.o \
+ Grid_OldStarFeedback.o \
Grid_OneZoneFreefallTestInitializeGrid.o \
Grid_OutputAsParticleData.o \
Grid_OutputStarParticleInformation.o \
diff -r 20cc80373d61 -r 5795a56c9b22 src/enzo/Mpich_V2_Dims_create.c
--- a/src/enzo/Mpich_V2_Dims_create.c
+++ /dev/null
@@ -1,341 +0,0 @@
-#include<stdio.h>
-
-/* Because we store factors with their multiplicities, a small array can
- store all of the factors for a large number (grows *faster* than n
- factorial). */
-
-#define MAX_FACTORS 10
-/* 2^20 is a millon */
-#define MAX_DIMS 20
-
-typedef struct Factors { int val, cnt; } Factors;
-
-/* This routine may be global if we are not using weak symbols */
-int MPIR_Factor( int, Factors [], int * );
-int MPIR_ChooseFactors( int, Factors [], int, int, int [] );
-
-/* Return the factors of n and their multiplicity in factors; the number of
- distinct factors is the return value and the total number of factors,
- including multiplicities, is returned in ndivisors */
-
-#define NUM_PRIMES 168
- static int primes[NUM_PRIMES] =
- {2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
- 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
- 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
- 127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
- 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
- 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
- 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
- 353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
- 419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
- 467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
- 547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
- 607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
- 661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
- 739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
- 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
- 877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
- 947, 953, 967, 971, 977, 983, 991, 997};
-
-int MPIR_Factor( int n, Factors factors[], int *ndivisors )
-{
- int n_tmp, n_root;
- int i, nfactors=0, nall=0;
- int cnt;
-
- /* Start from an approximate of the square root of n, by first finding
- the power of 2 at least as large as n. The approximate root is then
- 2 to the 1/2 this power */
- n_tmp = n;
- n_root = 0;
- while (n_tmp) {
- n_root ++;
- n_tmp >>= 1;
- }
- n_root = 1 << (n_root / 2);
-
- /* Find the prime number that less than that value and try dividing
- out the primes. */
- for (i=0; i<NUM_PRIMES; i++) {
- if (primes[i] > n_root) break;
- }
-
- /* For each prime, divide out as many as possible */
- for (;i>=0;i--) {
- cnt = 0;
- while ( (n % primes[i]) == 0) {
- cnt ++;
- n = n / primes[i];
- }
- if (cnt > 0) {
- /* --BEGIN ERROR HANDLING-- */
- if (nfactors + 1 == MAX_FACTORS) {
- /* Time to panic. This should not happen, since the
- smallest number that could exceed this would
- be the product of the first 10 primes that are
- greater than one, which is 6469693230 */
- return nfactors;
- }
- /* --END ERROR HANDLING-- */
- factors[nfactors].val = primes[i];
- factors[nfactors++].cnt = cnt;
- nall += cnt;
- }
- }
- /* If nfactors == 0, n was a prime, so return that */
- if (nfactors == 0) {
- nfactors = 1;
- nall = 1;
- factors[0].val = n;
- factors[0].cnt = 1;
- }
- else if (n > 1) {
- /* We need one more factor (a single prime > n_root) */
- factors[nfactors].val = n;
- factors[nfactors++].cnt = 1;
- nall++;
- }
- *ndivisors = nall;
- return nfactors;
-}
-
-/*
- Given a collection of factors from the factors routine and a number of
- required values, combine the elements in factors into "needed" elements
- of the array chosen. These are non-increasing and so can be used directly
- in setting values in the dims array in MPIR_Dims_create.
-
- Algorithm (very simple)
-
- target_size = nnodes / ndims needed.
- Accumulate factors, starting from the bottom,
- until the target size is met or exceeded.
- Put all of the remaining factors into the last dimension
- (recompute target_size with each step, since we may
- miss the target by a wide margin.
-
- A much more sophisticated code would try to balance
- the number of nodes assigned to each dimension, possibly
- in concert with guidelines from the device about "good"
- sizes
-
- */
-
-int MPIR_ChooseFactors( int nfactors, Factors factors[],
- int nnodes, int needed, int chosen[] )
-{
- int nodes_needed = nnodes;
- int target_size = nodes_needed / needed;
- int factor;
- int i, j;
-
- /* First, distribute the factors into the chosen array */
- j = 0;
- for (i=0; i<needed; i++) {
- if (j >= nfactors) break;
- if (i == needed-1) {
- /* Dump all of the remaining factors into this
- entry */
- factor = 1;
- while (j < nfactors) {
- factor *= factors[j].val;
- if (--factors[j].cnt == 0) j++;
- }
- }
- else {
- /* Get the current target size */
- factor = 1;
- while (j < nfactors && factor < target_size) {
- factor *= factors[j].val;
- if (--factors[j].cnt == 0) j++;
- }
- }
- chosen[i] = factor;
- nodes_needed /= factor;
- target_size = nodes_needed / (needed - i);
- }
- /* finish up */
- for (; i<needed; i++) chosen[i] = 1;
-
- /* Second, sort the chosen array in non-increasing order. Use
- a simple bubble sort because the number of elements is always small */
- for (i=0; i<needed-1; i++) {
- for (j=i+1; j<needed; j++) {
- if (chosen[j] > chosen[i]) {
- int tmp = chosen[i];
- chosen[i] = chosen[j];
- chosen[j] = tmp;
- }
- }
- }
- return 0;
-}
-
-int XXMPI_Dims_create( int nnodes, int ndims, int *dims )
-{
- Factors factors[MAX_FACTORS];
- int chosen[MAX_DIMS];
- int i, j, mpi_errno;
- int dims_needed, dims_product, nfactors, ndivisors;
-
- /* Find the number of unspecified dimensions in dims and the product
- of the positive values in dims */
- dims_needed = 0;
- dims_product = 1;
- for (i=0; i<ndims; i++) {
- if (dims[i] < 0) {
-
-/*
- mpi_errno = MPIR_Err_create_code( MPI_SUCCESS,
- MPIR_ERR_RECOVERABLE,
- "MPIR_Dims_create", __LINE__,
- MPI_ERR_DIMS,
- "**argarrayneg",
- "**argarrayneg %s %d %d", "dims", i, dims[i]);
- return mpi_errno;
-*/
- fprintf(stderr, "FAILED1\n");
- return -1;
- }
- if (dims[i] == 0) dims_needed ++;
- else dims_product *= dims[i];
- }
-
- /* Can we factor nnodes by dims_product? */
- if ((nnodes / dims_product ) * dims_product != nnodes ) {
-/*
- mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
- "MPIR_Dims_create", __LINE__,
- MPI_ERR_DIMS, "**dimspartition", 0);
- return mpi_errno;
-*/
- fprintf(stderr, "FAILED2\n");
- return -2;
- }
-
- if (!dims_needed) {
- /* Special case - all dimensions provided */
-/*
- return MPI_SUCCESS;
-*/
- return 0;
- }
-
- if (dims_needed > MAX_DIMS) {
- /* --BEGIN ERROR HANDLING-- */
-/*
- mpi_errno = MPIR_Err_create_code( MPI_SUCCESS,
- MPIR_ERR_RECOVERABLE,
- "MPIR_Dims_create", __LINE__, MPI_ERR_DIMS,
- "**dimsmany", "**dimsmany %d %d", dims_needed, MAX_DIMS );
- return mpi_errno;
-*/
- /* --END ERROR HANDLING-- */
- fprintf(stderr, "FAILED3\n");
- return -3;
- }
-
- nnodes /= dims_product;
-
- /* Now, factor nnodes into dims_needed components. We'd like these
- to match the underlying machine topology as much as possible. In the
- absence of information about the machine topology, we can try to
- make the factors a close to each other as possible.
-
- The MPICH 1 version used donated code that was quite sophisticated
- and complex. However, since it didn't take the system topology
- into account, it was more sophisticated that was perhaps warranted.
- In addition, useful values of nnodes for most MPI programs will be
- of the order 10-10000, and powers of two will be common.
- */
-
- /* Get the factors */
- nfactors = MPIR_Factor( nnodes, factors, &ndivisors );
-
- /* Divide into 3 major cases:
- 1. Fewer divisors than needed dimensions. Just use all of the
- factors up, setting the remaining dimensions to 1
- 2. Only one distinct factor (typically 2) but with greater
- multiplicity. Give each dimension a nearly equal size
- 3. Other. There are enough factors to divide among the dimensions.
- This is done in an ad hoc fashion
- */
-
-/* DEBUG
- printf( "factors are (%d of them) with %d divisors\n", nfactors, ndivisors );
- for (j=0; j<nfactors; j++) {
- printf( "val = %d repeated %d\n", factors[j].val, factors[j].cnt );
- }
-*/
- /* The MPI spec requires that the values that are set be in nonincreasing
- order (MPI-1, section 6.5). */
-
- /* Distribute the factors among the dimensions */
- if (ndivisors <= dims_needed) {
- /* Just use the factors as needed. */
- MPIR_ChooseFactors( nfactors, factors, nnodes, dims_needed, chosen );
- j = 0;
- for (i=0; i<ndims; i++) {
- if (dims[i] == 0) {
- dims[i] = chosen[j++];
- }
- }
-#if 0
- /* Any remaining unset dims are set to one */
- for (i++;i<ndims; i++) {
- if (dims[i] == 0)
- dims[i] = 1;
- }
-#endif
- }
- else {
- /* We must combine some of the factors */
- /* This is what the fancy code is for in the MPICH-1 code.
- If the number of distinct factors is 1 (e.g., a power of 2),
- then this code can be much simpler */
- /* NOT DONE */
- /* FIXME */
- if (nfactors == 1) {
- /* Special case for k**n, such as powers of 2 */
- int factor = factors[0].val;
- int cnt = factors[0].cnt; /* Numver of factors left */
- int cnteach = ( cnt + dims_needed - 1 ) / dims_needed;
- int factor_each;
-
- factor_each = factor;
- for (i=1; i<cnteach; i++) factor_each *= factor;
-
- for (i=0; i<ndims; i++) {
- if (dims[i] == 0) {
- if (cnt > cnteach) {
- dims[i] = factor_each;
- cnt -= cnteach;
- }
- else if (cnt > 0) {
- factor_each = factor;
- for (j=1; j<cnt; j++)
- factor_each *= factor;
- dims[i] = factor_each;
- cnt = 0;
- }
- else {
- dims[i] = 1;
- }
- }
- }
- }
- else {
- /* Here is the general case. */
- MPIR_ChooseFactors( nfactors, factors, nnodes, dims_needed,
- chosen );
- j = 0;
- for (i=0; i<ndims; i++) {
- if (dims[i] == 0) {
- dims[i] = chosen[j++];
- }
- }
- }
- }
- return 0;
-}
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/enzo/enzo-dev/commits/c8ef2deea76f/
Changeset: c8ef2deea76f
Branch: week-of-code
User: pgrete
Date: 2018-01-03 17:36:22+00:00
Summary: Merged enzo/enzo-dev into week-of-code
Affected #: 13 files
diff -r 5795a56c9b22 -r c8ef2deea76f doc/manual/source/parameters/problemtypes.rst
--- a/doc/manual/source/parameters/problemtypes.rst
+++ b/doc/manual/source/parameters/problemtypes.rst
@@ -847,7 +847,11 @@
Initial H2II fraction of the sphere. Default: 3e-14
``CollapseTestSphereInitialLevel`` (external)
Failed experiment to try to force refinement to a specified level.
- Not working. Default: 0.
+ Not working. Default: 0
+``CollapseTestWind`` (external)
+ Boolean flag. Type: integer. This parameter decides if there is wind (inflow boundary). Default: 0 (FALSE)
+``CollapseTestWindVelocity`` (external)
+ When using inflow boundary, this is the inflow velocity. Default: 0.
.. _testgravitymotion_param:
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/CallProblemSpecificRoutines.C
--- a/src/enzo/CallProblemSpecificRoutines.C
+++ b/src/enzo/CallProblemSpecificRoutines.C
@@ -42,7 +42,7 @@
ThisGrid->GridData->SphericalInfallGetProfile(level, 1);
if (ProblemType == 30)
ThisGrid->GridData->AnalyzeTrackPeaks(level, 0);
- if (ProblemType == 27)
+ if (ProblemType == 27){
if (ThisGrid->GridData->ReturnProcessorNumber()==MyProcessorNumber){
float AM[3], MeanVelocity[3], DMVelocity[3];
FLOAT Center[] = {0,0,0}, CenterOfMass[3], DMCofM[3];
@@ -60,6 +60,9 @@
-CenterOfMass[0], -CenterOfMass[1], -CenterOfMass[2],
DMCofM[0], DMCofM[1], DMCofM[2]);
}
+ if (StellarWindSpeed > 0)
+ ThisGrid->GridData->AddStellarWind();
+ }
/* Solve analytical free-fall */
if (ProblemType == 63) {
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/CollapseTestInitialize.C
--- a/src/enzo/CollapseTestInitialize.C
+++ b/src/enzo/CollapseTestInitialize.C
@@ -31,12 +31,16 @@
#include "Hierarchy.h"
#include "LevelHierarchy.h"
#include "TopGridData.h"
+#include "phys_constants.h"
void WriteListOfFloats(FILE *fptr, int N, float floats[]);
void WriteListOfFloats(FILE *fptr, int N, FLOAT floats[]);
void AddLevel(LevelHierarchyEntry *Array[], HierarchyEntry *Grid, int level);
int RebuildHierarchy(TopGridData *MetaData,
LevelHierarchyEntry *LevelArray[], int level);
+int GetUnits(float *DensityUnits, float *LengthUnits,
+ float *TemperatureUnits, float *TimeUnits,
+ float *VelocityUnits, double *MassUnits, FLOAT Time);
static float CollapseTestInitialFractionHII = 1.2e-5;
static float CollapseTestInitialFractionHeII = 1.0e-14;
@@ -46,7 +50,7 @@
static float CollapseTestInitialFractionH2II = 3.0e-14;
int CollapseTestInitialize(FILE *fptr, FILE *Outfptr,
- HierarchyEntry &TopGrid, TopGridData &MetaData)
+ HierarchyEntry &TopGrid, TopGridData &MetaData, ExternalBoundary &Exterior)
{
const char *DensName = "Density";
const char *TEName = "TotalEnergy";
@@ -82,6 +86,7 @@
float CollapseTestParticleMeanDensity = FLOAT_UNDEFINED;
int CollapseTestUseColour = FALSE;
int CollapseTestUseMetals = FALSE;
+ int CollapseTestWind = FALSE;
float CollapseTestInitialTemperature = 1000;
float CollapseTestInitialDensity = 1.0;
float CollapseTestSphereDensity[MAX_SPHERES],
@@ -99,7 +104,8 @@
CollapseTestSphereHIIFraction[MAX_SPHERES],
CollapseTestSphereHeIIFraction[MAX_SPHERES],
CollapseTestSphereHeIIIFraction[MAX_SPHERES],
- CollapseTestSphereH2IFraction[MAX_SPHERES];
+ CollapseTestSphereH2IFraction[MAX_SPHERES],
+ CollapseTestWindVelocity[MAX_DIMENSION];
int CollapseTestSphereNumShells[MAX_SPHERES],
CollapseTestSphereInitialLevel[MAX_SPHERES],
CollapseTestSphereType[MAX_SPHERES],
@@ -140,6 +146,7 @@
}
for (dim = 0; dim < MAX_DIMENSION; dim++)
CollapseTestUniformVelocity[dim] = 0;
+ CollapseTestWindVelocity[dim] = 0;
/* read input from file */
@@ -161,6 +168,8 @@
&CollapseTestUseColour);
ret += sscanf(line, "CollapseTestUseMetals = %"ISYM,
&CollapseTestUseMetals);
+ ret += sscanf(line, "CollapseTestWind = %"ISYM,
+ &CollapseTestWind);
ret += sscanf(line, "CollapseTestInitialTemperature = %"FSYM,
&CollapseTestInitialTemperature);
ret += sscanf(line, "CollapseTestInitialDensity = %"FSYM,
@@ -255,6 +264,10 @@
ret += sscanf(line, "CollapseTestInitialFractionH2II = %"FSYM,
&CollapseTestInitialFractionH2II);
+ ret += sscanf(line, "CollapseTestWindVelocity = %"FSYM" %"FSYM" %"FSYM,
+ &CollapseTestWindVelocity[0],&CollapseTestWindVelocity[1],&CollapseTestWindVelocity[2]);
+
+
/* if the line is suspicious, issue a warning */
if (ret == 0 && strstr(line, "=") && strstr(line, "CollapseTest")
@@ -280,7 +293,7 @@
CollapseTestSphereHeIIIFraction, CollapseTestSphereH2IFraction,
CollapseTestUseParticles, CollapseTestParticleMeanDensity,
CollapseTestUniformVelocity, CollapseTestUseColour,
- CollapseTestUseMetals,
+ CollapseTestUseMetals,
CollapseTestInitialTemperature, CollapseTestInitialDensity,
0,
CollapseTestInitialFractionHII, CollapseTestInitialFractionHeII,
@@ -467,6 +480,56 @@
} // end: if (CollapseTestRefineAtStart)
+ /* If there is wind, initialize the exterior */
+
+ if (CollapseTestWind) {
+ Exterior.Prepare(TopGrid.GridData);
+
+ const int MAX_BNDRY_VARS = 6;
+ Mu=0.6;
+ float InflowValue[MAX_BNDRY_VARS], Dummy[MAX_BNDRY_VARS];
+ float DensityUnits, LengthUnits, TemperatureUnits, TimeUnits, VelocityUnits;
+ double MassUnits;
+ if (GetUnits(&DensityUnits, &LengthUnits,&TemperatureUnits, &TimeUnits,
+ &VelocityUnits, &MassUnits, MetaData.Time) == FAIL){
+ fprintf(stderr, "Error in GetUnits.\n");
+ return FAIL;
+ }
+ float EnergyUnits;
+ float TempToEnergyConversion;
+ EnergyUnits = POW(LengthUnits, 2.0) / POW(TimeUnits, 2.0);
+ TempToEnergyConversion = kboltz/((Gamma - 1.0)*Mu*mh);
+ TempToEnergyConversion /= EnergyUnits; // this times temperature gives you energy units in ENZO UNITS (K -> Enzo)
+
+ InflowValue[0] = CollapseTestInitialDensity;
+ InflowValue[1] = CollapseTestInitialTemperature*TempToEnergyConversion;
+ if (HydroMethod != 2) {
+ InflowValue[1] = InflowValue[1] + 0.5*(POW(CollapseTestWindVelocity[0]/VelocityUnits,2)
+ + POW(CollapseTestWindVelocity[1]/VelocityUnits,2)
+ + POW(CollapseTestWindVelocity[2]/VelocityUnits,2));
+ }
+ InflowValue[2] = CollapseTestWindVelocity[0]/VelocityUnits;
+ InflowValue[3] = CollapseTestWindVelocity[1]/VelocityUnits;
+ InflowValue[4] = CollapseTestWindVelocity[2]/VelocityUnits;
+ if (CollapseTestUseMetals)
+ InflowValue[5] = 1.0e-10; ///need to be changed to the wind metallicity
+
+ if (Exterior.InitializeExternalBoundaryFace(0, inflow, outflow, InflowValue,
+ Dummy) == FAIL) {
+ fprintf(stderr, "Error in InitializeExternalBoundaryFace.\n");
+ return FAIL;
+ }
+ if (MetaData.TopGridRank > 1)
+ Exterior.InitializeExternalBoundaryFace(1, periodic, periodic,
+ Dummy, Dummy);
+ if (MetaData.TopGridRank > 2)
+ Exterior.InitializeExternalBoundaryFace(2, periodic, periodic,
+ Dummy, Dummy);
+ }
+
+
+
+
/* set up field names and units */
int count = 0;
@@ -516,6 +579,8 @@
CollapseTestUseColour);
fprintf(Outfptr, "CollapseTestUseMetals = %"ISYM"\n",
CollapseTestUseMetals);
+ fprintf(Outfptr, "CollapseTestWind = $"ISYM"\n",
+ CollapseTestWind);
fprintf(Outfptr, "CollapseTestInitialTemperature = %"FSYM"\n",
CollapseTestInitialTemperature);
fprintf(Outfptr, "CollapseTestInitialDensity = %"FSYM"\n",
@@ -523,6 +588,9 @@
fprintf(Outfptr, "CollapseTestUniformVelocity = %"FSYM" %"FSYM" %"FSYM"\n",
CollapseTestUniformVelocity[0], CollapseTestUniformVelocity[1],
CollapseTestUniformVelocity[2]);
+ fprintf(Outfptr, "CollapseTestWindVelocity = %"FSYM" %"FSYM" %"FSYM"\n",
+ CollapseTestWindVelocity[0], CollapseTestWindVelocity[1],
+ CollapseTestWindVelocity[2]);
for (sphere = 0; sphere < CollapseTestNumberOfSpheres; sphere++) {
fprintf(Outfptr, "CollapseTestSphereType[%"ISYM"] = %"ISYM"\n", sphere,
CollapseTestSphereType[sphere]);
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/Grid.h
--- a/src/enzo/Grid.h
+++ b/src/enzo/Grid.h
@@ -2736,6 +2736,7 @@
int ClusterSMBHFeedback(int level);
int ClusterSMBHEachGridGasMass(int level);
int OldStarFeedback();
+ int AddStellarWind();
int SetNumberOfColours(void);
int SaveSubgridFluxes(fluxes *SubgridFluxes[], int NumberOfSubgrids,
float *Flux3D[], int flux, float fluxcoef, float dt);
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/Grid_AddStellarWind.C
--- /dev/null
+++ b/src/enzo/Grid_AddStellarWind.C
@@ -0,0 +1,98 @@
+/***********************************************************************
+/
+/ GRID: ADD Stellar Wind from a fixed point (a single star)
+/
+/ written by: Yuan Li
+/ date: Aug, 2017
+/ modified1:
+/
+/ PURPOSE: Simulate AGB Wind from a star like Mira
+/
+************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include "ErrorExceptions.h"
+#include "macros_and_parameters.h"
+#include "typedefs.h"
+#include "global_data.h"
+#include "Fluxes.h"
+#include "GridList.h"
+#include "ExternalBoundary.h"
+#include "Grid.h"
+#include "Hierarchy.h"
+#include "CosmologyParameters.h"
+#include "phys_constants.h"
+int GetUnits(float *DensityUnits, float *LengthUnits,
+ float *TemperatureUnits, float *TimeUnits,
+ float *VelocityUnits, double *MassUnits, FLOAT Time);
+
+int grid::AddStellarWind()
+{
+ if (MyProcessorNumber != ProcessorNumber)
+ return SUCCESS;
+
+ int DensNum, GENum, TENum, Vel1Num, Vel2Num, Vel3Num;
+ if (this->IdentifyPhysicalQuantities(DensNum, GENum, Vel1Num, Vel2Num,
+ Vel3Num, TENum) == FAIL) ///this or thisgrid
+ ENZO_FAIL("Error in IdentifyPhysicalQuantities.");
+
+
+ int i, j, k;
+ Mu = 0.6;
+
+ float DensityUnits = 1.0, LengthUnits = 1.0, TemperatureUnits = 1,
+ TimeUnits = 1.0, VelocityUnits = 1.0;
+ double MassUnits=1.0 ;
+
+ if (GetUnits(&DensityUnits, &LengthUnits, &TemperatureUnits,
+ &TimeUnits, &VelocityUnits, &MassUnits, Time) == FAIL) {
+ fprintf(stderr, "Error in GetUnits.\n");
+ return FAIL;
+ }
+
+
+
+ FLOAT r, x, y, z = 0;
+
+ for (k = 0; k < GridDimension[2]; k++)
+ for (j = 0; j < GridDimension[1]; j++)
+ for (i = 0; i < GridDimension[0]; i++) {
+
+ /* Compute position */
+
+ x = CellLeftEdge[0][i] + 0.5*CellWidth[0][i];
+ if (GridRank > 1)
+ y = CellLeftEdge[1][j] + 0.5*CellWidth[1][j];
+ if (GridRank > 2)
+ z = CellLeftEdge[2][k] + 0.5*CellWidth[2][k];
+
+ /* Find distance from center of star (0.125, 0.5, 0.5). */
+
+ FLOAT xpos, ypos, zpos;
+ xpos = x-StellarWindCenterPosition[0];
+ ypos = y-StellarWindCenterPosition[1];
+ zpos = z-StellarWindCenterPosition[2];
+ r = sqrt(xpos*xpos + ypos*ypos + zpos*zpos);
+ r = max(r, 0.1*CellWidth[0][0]);
+ if (r<StellarWindRadius){
+ BaryonField[DensNum][GRIDINDEX_NOGHOST(i,j,k)] = StellarWindDensity*POW(r/StellarWindRadius, -2);
+ BaryonField[Vel1Num][GRIDINDEX_NOGHOST(i,j,k)] = StellarWindSpeed * (xpos/r)/VelocityUnits;
+ BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)] = StellarWindSpeed * (ypos/r)/VelocityUnits;
+ BaryonField[Vel3Num][GRIDINDEX_NOGHOST(i,j,k)] = StellarWindSpeed * (zpos/r)/VelocityUnits;
+ BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] = StellarWindTemperature/TemperatureUnits/ ((Gamma-1.0)*Mu);
+ if (HydroMethod != Zeus_Hydro){
+ BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] += 0.5*POW(BaryonField[Vel1Num][GRIDINDEX_NOGHOST(i,j,k)], 2.0);
+ if(GridRank > 1)
+ BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] += 0.5*POW(BaryonField[Vel2Num][GRIDINDEX_NOGHOST(i,j,k)], 2.0);
+ if(GridRank > 2)
+ BaryonField[TENum][GRIDINDEX_NOGHOST(i,j,k)] += 0.5*POW(BaryonField[Vel3Num][GRIDINDEX_NOGHOST(i,j,k)], 2.0);
+ }
+ }
+ }
+ return SUCCESS;
+
+}
+
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/Grid_CollapseTestInitializeGrid.C
--- a/src/enzo/Grid_CollapseTestInitializeGrid.C
+++ b/src/enzo/Grid_CollapseTestInitializeGrid.C
@@ -756,6 +756,15 @@
} // end: loop over dims
} // end: disk
+
+ /* 11) stellar wind, with r^-2 power law density*/
+ if (SphereType[sphere] == 11) {
+ radial_velocity = StellarWindSpeed/VelocityUnits;
+ dens1 = StellarWindDensity*pow(r/StellarWindRadius, -2);
+ Velocity[0] += radial_velocity * xpos / r;
+ Velocity[1] += radial_velocity * ypos / r;
+ Velocity[2] += radial_velocity * zpos / r;
+ } // end stellar wind
/* If the density is larger than the background (or the previous
sphere), then set the velocity. */
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/InitializeNew.C
--- a/src/enzo/InitializeNew.C
+++ b/src/enzo/InitializeNew.C
@@ -107,7 +107,7 @@
int GravityEquilibriumTestInitialize(FILE *fptr, FILE *Outfptr,
HierarchyEntry &TopGrid, TopGridData &MetaData);
int CollapseTestInitialize(FILE *fptr, FILE *Outfptr,
- HierarchyEntry &TopGrid, TopGridData &MetaData);
+ HierarchyEntry &TopGrid, TopGridData &MetaData, ExternalBoundary &Exterior);
int ClusterInitialize(FILE *fptr, FILE *Outfptr,
HierarchyEntry &TopGrid, TopGridData &MetaData, ExternalBoundary &Exterior);
int TestGravityMotion(FILE *fptr, FILE *Outfptr, HierarchyEntry &TopGrid,
@@ -491,7 +491,7 @@
// 27) CollapseTest
if (ProblemType == 27)
- ret = CollapseTestInitialize(fptr, Outfptr, TopGrid, MetaData);
+ ret = CollapseTestInitialize(fptr, Outfptr, TopGrid, MetaData, Exterior);
// 28) TestGravityMotion
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/Make.config.objects
--- a/src/enzo/Make.config.objects
+++ b/src/enzo/Make.config.objects
@@ -498,6 +498,7 @@
Grid_NestedCosmologySimulationInitializeGrid.o \
Grid_NohInitializeGrid.o \
Grid_OldStarFeedback.o \
+ Grid_AddStellarWind.o \
Grid_OneZoneFreefallTestInitializeGrid.o \
Grid_OutputAsParticleData.o \
Grid_OutputStarParticleInformation.o \
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/ReadParameterFile.C
--- a/src/enzo/ReadParameterFile.C
+++ b/src/enzo/ReadParameterFile.C
@@ -1221,6 +1221,12 @@
ret += sscanf(line, "EllipticalGalaxyRe = %"FSYM, &EllipticalGalaxyRe);
ret += sscanf(line, "OldStarFeedbackAlpha = %"FSYM, &OldStarFeedbackAlpha);
ret += sscanf(line, "SNIaFeedbackEnergy = %"FSYM, &SNIaFeedbackEnergy);
+ ret += sscanf(line, "StellarWindSpeed = %"FSYM, &StellarWindSpeed);
+ ret += sscanf(line, "StellarWindDensity = %"FSYM, &StellarWindDensity);
+ ret += sscanf(line, "StellarWindRadius = %"FSYM, &StellarWindRadius);
+ ret += sscanf(line, "StellarWindTemperature = %"FSYM, &StellarWindTemperature);
+ ret += sscanf(line, "StellarWindCenterPosition = %"PSYM" %"PSYM" %"PSYM,
+ StellarWindCenterPosition, StellarWindCenterPosition+1, StellarWindCenterPosition+2);
ret += sscanf(line, "ExtraOutputs = %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM" %"ISYM"", ExtraOutputs,
ExtraOutputs +1,ExtraOutputs +2,ExtraOutputs +3,
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/SetDefaultGlobalValues.C
--- a/src/enzo/SetDefaultGlobalValues.C
+++ b/src/enzo/SetDefaultGlobalValues.C
@@ -249,6 +249,7 @@
DiskGravityAngularMomentum[dim] = 0.0;
GalaxySimulationRPSWindVelocity[dim] = 0.0;
GalaxySimulationPreWindVelocity[dim] = 0.0;
+ StellarWindCenterPosition[dim] = 0.5;
}
if( MAX_DIMENSION > 0 ) DiskGravityAngularMomentum[MAX_DIMENSION-1] = 1.0;
@@ -631,6 +632,11 @@
OldStarFeedbackAlpha = 0.0;
SNIaFeedbackEnergy = 1.0;
+ StellarWindSpeed = 0.0; // in cgs
+ StellarWindDensity = 1.0; // in code unit
+ StellarWindRadius = 0.01; // in code unit
+ StellarWindTemperature = 100.0; // in K
+
PythonTopGridSkip = 0;
PythonSubcycleSkip = 1;
PythonReloadScript = FALSE;
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/ShockInABoxInitialize.C
--- a/src/enzo/ShockInABoxInitialize.C
+++ b/src/enzo/ShockInABoxInitialize.C
@@ -222,6 +222,9 @@
}
InflowValue[DensNum] = ShockInABoxDensity[0];
InflowValue[Vel1Num] = ShockInABoxVelocity[0];
+ if (HydroMethod == 2)
+ InflowValue[TENum] = ShockInABoxPressure[0]/(Gamma-1.0)/ShockInABoxDensity[0];
+ else
InflowValue[TENum] = ShockInABoxPressure[0]/(Gamma-1.0)/ShockInABoxDensity[0]
+ 0.5*POW(ShockInABoxVelocity[0], 2);
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/WriteParameterFile.C
--- a/src/enzo/WriteParameterFile.C
+++ b/src/enzo/WriteParameterFile.C
@@ -1040,6 +1040,13 @@
fprintf(fptr, "EllipticalGalaxyRe = %"FSYM"\n", EllipticalGalaxyRe);
fprintf(fptr, "OldStarFeedbackAlpha = %"FSYM"\n", OldStarFeedbackAlpha);
fprintf(fptr, "SNIaFeedbackEnergy = %"FSYM"\n", SNIaFeedbackEnergy);
+ fprintf(fptr, "StellarWindSpeed = %"FSYM"\n", StellarWindSpeed);
+ fprintf(fptr, "StellarWindDensity = %"FSYM"\n", StellarWindDensity);
+ fprintf(fptr, "StellarWindRadius = %"FSYM"\n", StellarWindRadius);
+ fprintf(fptr, "StellarWindTemperature = %"FSYM"\n", StellarWindTemperature);
+ fprintf(fptr, "StellarWindCenterPosition = ");
+ WriteListOfFloats(fptr, MetaData.TopGridRank, StellarWindCenterPosition);
+
fprintf(fptr, "H2StarMakerEfficiency = %"GSYM"\n", H2StarMakerEfficiency);
fprintf(fptr, "H2StarMakerNumberDensityThreshold = %"GSYM"\n", H2StarMakerNumberDensityThreshold);
fprintf(fptr, "H2StarMakerMinimumMass = %"GSYM"\n", H2StarMakerMinimumMass);
diff -r 5795a56c9b22 -r c8ef2deea76f src/enzo/global_data.h
--- a/src/enzo/global_data.h
+++ b/src/enzo/global_data.h
@@ -1075,6 +1075,13 @@
EXTERN float OldStarFeedbackAlpha;
EXTERN float SNIaFeedbackEnergy;
+/* Stellar Wind from a single AGB star */
+EXTERN float StellarWindRadius;
+EXTERN float StellarWindDensity;
+EXTERN float StellarWindSpeed;
+EXTERN float StellarWindTemperature;
+EXTERN FLOAT StellarWindCenterPosition[3];
+
EXTERN int MHDCTSlopeLimiter;
EXTERN int MHDCTDualEnergyMethod;
EXTERN int MHDCTPowellSource;
Repository URL: https://bitbucket.org/enzo/enzo-dev/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.