Merge branch 'development' into neoSkeleton

This commit is contained in:
Jeffrey Garretson
2024-05-09 14:24:21 -07:00
203 changed files with 17911 additions and 9632 deletions

8
.gitignore vendored
View File

@@ -13,6 +13,9 @@ tests/*/*.F90
!tests/helperCode/*.F90
!tests/helperCode_mpi/*.F90
# Pre-compile generated files
src/base/git_info.F90
# Compiled Object files
*.slo
*.lo
@@ -63,4 +66,7 @@ analysis/notebooks/Tutorial/.ipynb_checkpoints
.env
# ifort cruft
*.i90
*.i90
# Emacs backup files
*~

View File

@@ -23,9 +23,13 @@ execute_process(COMMAND git log -1 --format=%h WORKING_DIRECTORY ${CMAKE_SOURCE_
OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE)
#Add defines for commit hash/branch
add_compile_definitions(GITCOMMITHASH=${GIT_COMMIT_HASH})
add_compile_definitions(GITBRANCH=${GIT_BRANCH})
# Generate git_info.F90 with GITBRANCH value
set(GIT_INFO_FILE "${CMAKE_SOURCE_DIR}/src/base/git_info.F90")
file(WRITE ${GIT_INFO_FILE} "module git_info\n")
file(APPEND ${GIT_INFO_FILE} " implicit none\n")
file(APPEND ${GIT_INFO_FILE} " character(len=*), parameter :: gitB = '${GIT_BRANCH}'\n")
file(APPEND ${GIT_INFO_FILE} " character(len=*), parameter :: gitH = '${GIT_COMMIT_HASH}'\n")
file(APPEND ${GIT_INFO_FILE} "end module git_info\n")
#Set some configuration defaults
option(ENABLE_OMP "Enable OMP parallelization" ON )
@@ -196,6 +200,13 @@ target_link_libraries(remix2remix.x remixlib baselib)
target_link_libraries(remix2rcm.x remixlib baselib)
add_dependencies(remix remix.x remix2remix.x remix2rcm.x baselib)
#-------------
# ShellGrid Tester
message("\tAdding executable testNewRM.x")
add_executable(testNewRMS.x src/drivers/testNewRMS.F90)
target_link_libraries(testNewRMS.x baselib)
add_dependencies(testNewRMS.x baselib)
#-------------
#Kaiju: RCM
message("Adding RCM module ...")
@@ -258,8 +269,7 @@ if(ENABLE_MPI)
add_executable(gamhelio_mpi.x src/drivers/gamera_mpix.F90 ${GAMHELIC})
target_link_libraries(gamhelio_mpi.x baselib gamlib basempilib gammpilib)
add_dependencies(gamhelio_mpi gamhelio_mpi.x)
endif()
if(PFUNIT_FOUND)

View File

@@ -57,10 +57,10 @@ if(CMAKE_Fortran_COMPILER_ID MATCHES Intel)
elseif( (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 17.0) AND (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0) )
message(WARNING "Compiler has incomplete F2008 features, Git hash/compiler information won't be saved to H5 files")
add_compile_definitions(__INTEL_COMPILER_OLD)
elseif( (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 20.0) )
elseif( (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 20.0) )
message(WARNING "Setting default optimization to O2 to avoid certain Intel compiler bugs")
set(CMAKE_DEFOPT "-O2")
endif()
set(CMAKE_DEFOPT "-O2")
endif()
elseif(CMAKE_Fortran_COMPILER_ID MATCHES GNU)
if(CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 8.0)
message("Fortran compiler too old! What, were you gonna use punch cards?")
@@ -68,6 +68,11 @@ elseif(CMAKE_Fortran_COMPILER_ID MATCHES GNU)
endif()
endif()
if(FAST_YOLO)
message("*** RUNNING WITH IMPRECISE HIGH SPEED OPTIONS ***")
set(CMAKE_DEFOPT "-O3") #over-ride default optimization to maximum
endif()
set(CMAKE_Fortran_FLAGS_DEBUG "-O0 -g")
set(CMAKE_Fortran_FLAGS_RELEASE "${CMAKE_DEFOPT}")
set(CMAKE_Fortran_FLAGS_RELWITHDEBINFO "${CMAKE_DEFOPT} -g")
@@ -76,9 +81,9 @@ set(CMAKE_Fortran_FLAGS_RELWITHDEBINFO "${CMAKE_DEFOPT} -g")
if(CMAKE_Fortran_COMPILER_ID MATCHES Intel)
set(dialect "-free -implicitnone")
#Base
string(APPEND CMAKE_Fortran_FLAGS " -fPIC")
string(APPEND CMAKE_Fortran_FLAGS " -fPIC -fpconstant")
#Production
set(PROD "-align array64byte -align rec32byte -no-prec-div -fast-transcendentals")
set(PROD "-align array64byte -align rec32byte -no-prec-div -fast-transcendentals")
#Production with Debug Info
set(PRODWITHDEBUGINFO "-traceback -debug all -align array64byte -align rec32byte -no-prec-div -fast-transcendentals")
#Debug

View File

@@ -432,10 +432,10 @@ def Aug2D(XX,YY,doEps=False,TINY=1.0e-8,KeepOut=True,Rpx=1.15):
#Just reflect about X-axis
for i in range(0,Ng):
ip = i+1
xxG[:,jS-ip] = xxG[:,jS+ip]
yyG[:,jS-ip] = -yyG[:,jS+ip]
xxG[:,jE+i] = xxG[:,jE-i-2]
yyG[:,jE+i] = -yyG[:,jE-i-2]
xxG[:,jS-ip] = xxG[:,jS+ip]
yyG[:,jS-ip] = -yyG[:,jS+ip]
xxG[:,jE-1+ip] = xxG[:,jE-1-ip]
yyG[:,jE-1+ip] = -yyG[:,jE-1-ip]
return xxG,yyG
@@ -549,25 +549,60 @@ def Aug3D(xxG,yyG,Nk=32,TINY=1.0e-8):
nFj = Nj+1+2*Ng
nFk = Nk+1+2*Ng
iS = Ng
iE = Ng+Ni+1
jS = Ng
jE = Ng+Nj+1
kS = Ng
kE = Ng+Nk+1
X3 = np.zeros((nFi,nFj,nFk))
Y3 = np.zeros((nFi,nFj,nFk))
Z3 = np.zeros((nFi,nFj,nFk))
#Angle about axis including ghosts
dA = 2*np.pi/Nk
A = np.linspace(0-dA*Ng,2*np.pi+dA*Ng,nFk)
A = np.linspace(0,2*np.pi,Nk+1)
for n in range(nFk):
X3[:,:,n] = xxG
Y3[:,:,n] = yyG*np.cos(A[n])
Z3[:,:,n] = yyG*np.sin(A[n])
for n in range(Nk+1):
X3[:,:,kS+n] = xxG
Y3[:,:,kS+n] = yyG*np.cos(A[n])
Z3[:,:,kS+n] = yyG*np.sin(A[n])
# K is periodic so ensure the last entry equals the first
X3[:,:,kE-1] = X3[:,:,kS]
Y3[:,:,kE-1] = Y3[:,:,kS]
Z3[:,:,kE-1] = Z3[:,:,kS]
# correct K periodic ghosts
for i in range(0,Ng):
ip=i+1
X3[:,:,kS-ip] = X3[:,:,kE-1-ip]
Y3[:,:,kS-ip] = Y3[:,:,kE-1-ip]
Z3[:,:,kS-ip] = Z3[:,:,kE-1-ip]
X3[:,:,kE-1+ip] = X3[:,:,kS+ip]
Y3[:,:,kE-1+ip] = Y3[:,:,kS+ip]
Z3[:,:,kE-1+ip] = Z3[:,:,kS+ip]
# correct J reflective ghosts
for k in range(0,nFk):
kp = k+Nk//2
if kp >= kE-1:
kp = k-Nk//2
for i in range(0,Ng):
ip = i+1
X3[:,jS-ip,k] = X3[:,jS+ip,kp]
Y3[:,jS-ip,k] = Y3[:,jS+ip,kp]
Z3[:,jS-ip,k] = Z3[:,jS+ip,kp]
X3[:,jE-1+ip,k] = X3[:,jE-1-ip,kp]
Y3[:,jE-1+ip,k] = Y3[:,jE-1-ip,kp]
Z3[:,jE-1+ip,k] = Z3[:,jE-1-ip,kp]
#Force points to plane
Z3[:,:,Ng] = 0.0
Z3[:,:,-Ng-1] = 0.0
Z3[:,:,Ng+Nk//2] = 0.0
Y3[:,:,Ng+Nk//4]
Y3[:,:,Ng+Nk//4] = 0.0
Y3[:,:,-Ng-Nk//4-1] = 0.0
x0 = X3[:,Ng,Ng]

View File

@@ -317,7 +317,7 @@ class GameraPipe(object):
NrX = max(self.Nr,1)
datasets = []
with alive_bar(NrX,title=titStr.ljust(kdefs.barLab),length=kdefs.barLen,bar=kdefs.barDef) as bar, \
with alive_bar(NrX,title=titStr.ljust(kdefs.barLab),length=kdefs.barLen,bar=kdefs.barDef,disable=not doVerb) as bar, \
ProcessPoolExecutor(max_workers=self.nWorkers) as executor:
futures = [executor.submit(kh5.PullVarLoc, fIn, vID, sID, loc=loc) for fIn, loc in files]
for future in as_completed(futures):
@@ -343,7 +343,7 @@ class GameraPipe(object):
if (vScl is not None):
V = vScl*V
return V
#Get 3D variable "vID" from Step# sID
def GetVar(self,vID,sID=None,vScl=None,doVerb=True):
''' Read Var with name vID
@@ -369,7 +369,7 @@ class GameraPipe(object):
else:
titStr = ''
NrX = max(self.Nr,1)
with alive_bar(NrX,title=titStr.ljust(kdefs.barLab),length=kdefs.barLen) as bar:
with alive_bar(NrX,title=titStr.ljust(kdefs.barLab),length=kdefs.barLen,disable=not doVerb) as bar:
for (i,j,k) in itertools.product(range(self.Ri),range(self.Rj),range(self.Rk)):
iS = i*self.dNi

View File

@@ -72,16 +72,18 @@ class GamsphPipe(GameraPipe):
self.yyc = np.zeros((Nr ,Np ))
self.BzD = np.zeros((Nr ,Np ))
#Create corners for stretched polar grid
#Create corners for stretched polar grid
#Use halfway k since it is agnostic to ghosts
eqK = self.Nk//2
#Upper half plane
for j in range(self.Nj):
self.xxi[:,j] = self.X[:,j,0]
self.yyi[:,j] = self.Y[:,j,0]
self.xxi[:,j] = self.X[:,j,eqK]
self.yyi[:,j] = -self.Y[:,j,eqK]
#Lower half plane
for j in range(self.Nj,Np+1):
jp = Np-j
self.xxi[:,j] = self.X[:,jp,0]
self.yyi[:,j] = -self.Y[:,jp,0]
self.xxi[:,j] = self.X[:,jp,eqK]
self.yyi[:,j] = self.Y[:,jp,eqK]
#Get centers for stretched polar grid & BzD
self.xxc = 0.25*(self.xxi[:-1,:-1] + self.xxi[1:,:-1] + self.xxi[:-1,1:] + self.xxi[1:,1:])
@@ -149,19 +151,23 @@ class GamsphPipe(GameraPipe):
#Get "egg" slice, variable matched to stretched polar grid
#Either equatorial or meridional
def EggSlice(self,vID,sID=None,vScl=None,doEq=True,doVerb=True):
def EggSlice(self,vID,sID=None,vScl=None,doEq=True,doVerb=True,numGhost=0):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
#For upper/lower half planes, average above/below
Nk2 = self.Nk//2
Nk4 = self.Nk//4
Nk2 = (self.Nk-2*numGhost)//2
Nk4 = (self.Nk-2*numGhost)//4
if (doEq):
ku = -1
kl = Nk2-1
ku1 = self.Nk - numGhost - 1
ku2 = numGhost
kl1 = numGhost + Nk2-1
kl2 = numGhost + Nk2
else:
ku = Nk4 - 1
kl = 3*Nk4-1
ku1 = numGhost + Nk4 - 1
ku2 = numGhost + Nk4
kl1 = numGhost + 3*Nk4 - 1
kl2 = numGhost + 3*Nk4
Nr = self.Ni
Np = 2*self.Nj
Qk = np.zeros((Nr,Np))
@@ -169,10 +175,10 @@ class GamsphPipe(GameraPipe):
if (j>=self.Nj):
#Lower half plane
jp = Np-j-1
Qk[:,j] = 0.5*( Q[:,jp,kl] + Q[:,jp,kl+1] )
Qk[:,j] = 0.5*( Q[:,jp,kl1] + Q[:,jp,kl2] )
else:
jp = j
Qk[:,j] = 0.5*( Q[:,jp,ku] + Q[:,jp,ku+1] )
Qk[:,j] = 0.5*( Q[:,jp,ku1] + Q[:,jp,ku2] )
return Qk
#Standard equatorial dbz (in nT)

View File

@@ -276,18 +276,34 @@ def downVolt(G):
#Upscale gBAvg variable (G) on grid X,Y,Z to doubled grid with simple linear interpolation.
#Typical size at quad for example: gBAvg Dataset {3, 128, 96, 1}
def upVolt(G):
Nd,Nk,Nj,Ni = G.shape
Gu = np.zeros((Nd,2*Nk,2*Nj,Ni))
print("Upscaling volt variables gBAvg etc...")
#Loop over coarse grid
for d in range(Nd):
for i in range(Ni):
for k in range(Nk):
for j in range(Nj):
Gjk = G[d,k,j,i]
j0 = 2*j
k0 = 2*k
Gu[d,k0:k0+2,j0:j0+2,i] = Gjk
try:
Nd,Nk,Nj,Ni = G.shape
Gu = np.zeros((Nd,2*Nk,2*Nj,Ni))
print("Upscaling volt variables gBAvg etc...")
#Loop over coarse grid
for d in range(Nd):
for i in range(Ni):
for k in range(Nk):
for j in range(Nj):
Gjk = G[d,k,j,i]
j0 = 2*j
k0 = 2*k
Gu[d,k0:k0+2,j0:j0+2,i] = Gjk
except:
Nd,Nk,Nj,Ni,Nh = G.shape
Gu = np.zeros((Nd,2*Nk,2*Nj,Ni,Nh))
print("Upscaling volt variables gBHist etc...")
print(G.shape)
#Loop over coarse grid
for h in range(Nh):
for d in range(Nd):
for i in range(Ni):
for k in range(Nk):
for j in range(Nj):
Gjk = G[d,k,j,i,h]
j0 = 2*j
k0 = 2*k
Gu[d,k0:k0+2,j0:j0+2,i,h] = Gjk
return Gu
#Downscale gas variable (G) on grid X,Y,Z (w/ ghosts) to halved grid

View File

@@ -23,11 +23,12 @@ eMax = 5.0 #Max current for contours
#Default pressure colorbar
vP = kv.genNorm(vMin=1.0e-2,vMax=10.0,doLog=True)
szStrs = ['small','std','big','dm']
szStrs = ['small','std','big','bigger','fullD','dm']
szBds = {}
szBds["std"] = [-40.0 ,20.0,2.0]
szBds["big"] = [-100.0,20.0,2.0]
szBds["bigger"] = [-200.0,25.0,2.0]
szBds["fullD"] = [-300.0,30.0,3.0] # full domain for double res
szBds["small"] = [-10.0 , 5.0,2.0]
szBds["dm"] = [-30.0 ,10.0,40.0/15.0]
@@ -48,6 +49,209 @@ def GetSizeBds(args):
return xyBds
#Plot absolute error in the requested, or given, equatorial field
def PlotEqErrAbs(gsphP,gsphO,nStp,xyBds,Ax,fieldNames,AxCB=None,doClear=True,doDeco=True,vMin=1e-9,vMax=1e-4,doLog=True,doVerb=True):
#specify two gsph objects as gsphPredicted (gsphP) and gsphObserved (gsphO)
#
normAbs = kv.genNorm(vMin,vMax,doLog=doLog)
cmapAbs = "PRGn"
if doLog:
cmapAbs = "magma"
if (AxCB is not None):
#Add the colorbar to AxCB
AxCB.clear()
cbString = ""
for fn in fieldNames:
cbString = cbString + "'" + fn + "', "
cbString = cbString + " Absolute Error"
kv.genCB(AxCB,normAbs,cbString,cM=cmapAbs)
#Now do main plotting
if (doClear):
Ax.clear()
dataAbs = None
for fn in fieldNames:
dataP = gsphP.EggSlice(fn,nStp,doEq=True,doVerb=doVerb)
dataO = gsphO.EggSlice(fn,nStp,doEq=True,doVerb=doVerb)
if dataAbs is None:
dataAbs = np.square(dataO - dataP)
else:
dataAbs = dataAbs + np.square(dataO - dataP)
dataAbs = np.sqrt(dataAbs)
Ax.pcolormesh(gsphP.xxi,gsphP.yyi,dataAbs,cmap=cmapAbs,norm=normAbs)
kv.SetAx(xyBds,Ax)
if (doDeco):
kv.addEarth2D(ax=Ax)
Ax.set_xlabel('SM-X [Re]')
Ax.set_ylabel('SM-Y [Re]')
return dataAbs
#Plot relative error in the requested, or given, equatorial field
def PlotEqErrRel(gsphP,gsphO,nStp,xyBds,Ax,fieldNames,AxCB=None,doClear=True,doDeco=True,vMin=1e-16,vMax=1,doLog=True,doVerb=True):
#specify two gsph objects as gsphPredicted (gsphP) and gsphObserved (gsphO)
#
normRel = kv.genNorm(vMin,vMax,doLog=doLog)
cmapRel = "PRGn"
if doLog:
cmapRel = "magma"
if (AxCB is not None):
#Add the colorbar to AxCB
AxCB.clear()
cbString = ""
for fn in fieldNames:
cbString = cbString + "'" + fn + "', "
cbString = cbString + " Relative Error"
kv.genCB(AxCB,normRel,cbString,cM=cmapRel)
#Now do main plotting
if (doClear):
Ax.clear()
dataAbs = None
dataBase = None
for fn in fieldNames:
dataP = gsphP.EggSlice(fn,nStp,doEq=True,doVerb=doVerb)
dataO = gsphO.EggSlice(fn,nStp,doEq=True,doVerb=doVerb)
if dataAbs is None:
dataBase = np.square(dataP)
dataAbs = np.square(dataO - dataP)
else:
dataBase = dataBase + np.square(dataP)
dataAbs = dataAbs + np.square(dataO - dataP)
dataBase = np.sqrt(dataBase)
dataAbs = np.sqrt(dataAbs)
# math breaks when Base field is exactly 0
dataBase[dataBase == 0] = np.finfo(np.float32).tiny
dataRel = np.absolute(dataAbs/dataBase)
Ax.pcolormesh(gsphP.xxi,gsphP.yyi,dataRel,cmap=cmapRel,norm=normRel)
kv.SetAx(xyBds,Ax)
if (doDeco):
kv.addEarth2D(ax=Ax)
Ax.set_xlabel('SM-X [Re]')
Ax.set_ylabel('SM-Y [Re]')
return dataRel
#Plot absolute error along the requested logical axis
def PlotLogicalErrAbs(gsphP,gsphO,nStp,Ax,fieldNames,meanAxis,AxCB=None,doClear=True,doDeco=True,vMin=1e-16,vMax=1,doLog=True,doVerb=True):
#specify two gsph objects as gsphPredicted (gsphP) and gsphObserved (gsphO)
#
normAbs = kv.genNorm(vMin,vMax,doLog=doLog)
cmapAbs = "PRGn"
if doLog:
cmapAbs = "magma"
if (AxCB is not None):
#Add the colorbar to AxCB
AxCB.clear()
cbString = ""
for fn in fieldNames:
cbString = cbString + "'" + fn + "', "
cbString = cbString + " Absolute Error along "
if meanAxis == 0:
cbString = cbString + "I axis"
elif meanAxis == 1:
cbString = cbString + "J axis"
elif meanAxis == 2:
cbString = cbString + "K axis"
kv.genCB(AxCB,normAbs,cbString,cM=cmapAbs)
#Now do main plotting
if (doClear):
Ax.clear()
dataAbs = CalcTotalErrAbs(gsphP,gsphO,nStp,fieldNames,doVerb=doVerb,meanAxis=meanAxis)
dataAbs = np.transpose(dataAbs) # transpose to put I/J on the horizontal axis
Ax.pcolormesh(dataAbs,cmap=cmapAbs,norm=normAbs)
if (doDeco):
if meanAxis == 0:
Ax.set_xlabel('J Indices')
Ax.set_ylabel('K Indices')
elif meanAxis == 1:
Ax.set_xlabel('I Indices')
Ax.set_ylabel('K Indices')
elif meanAxis == 2:
Ax.set_xlabel('I Indices')
Ax.set_ylabel('J Indices')
return dataAbs
#Plot relative error along the requested logical axis
def PlotLogicalErrRel(gsphP,gsphO,nStp,Ax,fieldNames,meanAxis,AxCB=None,doClear=True,doDeco=True,vMin=1e-16,vMax=1,doLog=True,doVerb=True):
#specify two gsph objects as gsphPredicted (gsphP) and gsphObserved (gsphO)
#
normRel = kv.genNorm(vMin,vMax,doLog=doLog)
cmapRel = "PRGn"
if doLog:
cmapRel = "magma"
if (AxCB is not None):
#Add the colorbar to AxCB
AxCB.clear()
cbString = ""
for fn in fieldNames:
cbString = cbString + "'" + fn + "', "
cbString = cbString + " Relative Error along "
if meanAxis == 0:
cbString = cbString + "I axis"
elif meanAxis == 1:
cbString = cbString + "J axis"
elif meanAxis == 2:
cbString = cbString + "K axis"
kv.genCB(AxCB,normAbs,cbString,cM=cmapAbs)
#Now do main plotting
if (doClear):
Ax.clear()
dataRel = CalcTotalErrRel(gsphP,gsphO,nStp,fieldNames,doVerb=doVerb,meanAxis=meanAxis)
dataRel = np.transpose(dataRel) # transpose to put I/J on the horizontal axis
Ax.pcolormesh(dataRel,cmap=cmapRel,norm=normRel)
if (doDeco):
if meanAxis == 0:
Ax.set_xlabel('J Indices')
Ax.set_ylabel('K Indices')
elif meanAxis == 1:
Ax.set_xlabel('I Indices')
Ax.set_ylabel('K Indices')
elif meanAxis == 2:
Ax.set_xlabel('I Indices')
Ax.set_ylabel('J Indices')
return dataRel
#Calculate total cumulative absolute error between two cases
def CalcTotalErrAbs(gsphP,gsphO,nStp,fieldNames,doVerb=True,meanAxis=None):
dataAbs = None
for fn in fieldNames:
dataP = gsphP.GetVar(fn,nStp,doVerb=doVerb)
dataO = gsphO.GetVar(fn,nStp,doVerb=doVerb)
if dataAbs is None:
dataAbs = np.square(dataO - dataP)
else:
dataAbs = dataAbs + np.square(dataO - dataP)
dataAbs = np.sqrt(dataAbs)
return np.mean(dataAbs,axis=meanAxis)
#Calculate total cumulative relative error between two cases
def CalcTotalErrRel(gsphP,gsphO,nStp,fieldNames,doVerb=True,meanAxis=None):
dataAbs = None
dataBase = None
for fn in fieldNames:
dataP = gsphP.GetVar(fn,nStp,doVerb=doVerb)
dataO = gsphO.GetVar(fn,nStp,doVerb=doVerb)
if dataAbs is None:
dataBase = np.square(dataP)
dataAbs = np.square(dataO - dataP)
else:
dataBase = dataBase + np.square(dataP)
dataAbs = dataAbs + np.square(dataO - dataP)
dataBase = np.sqrt(dataBase)
dataAbs = np.sqrt(dataAbs)
# math breaks when Base field is exactly 0
dataBase[dataBase == 0] = np.finfo(np.float32).tiny
dataRel = np.absolute(dataAbs/dataBase)
return np.mean(dataRel,axis=meanAxis)
#Plot equatorial field
def PlotEqB(gsph,nStp,xyBds,Ax,AxCB=None,doClear=True,doDeco=True,doBz=False):
vBZ = kv.genNorm(dbMax)

View File

@@ -36,12 +36,12 @@ DenPP = 50.0
ppCol = "orange"
#Get equatorial coordinates, masked if asked to
def RCMEq(rcmdata,nStp,doMask=False,doXYZ=doXYZ):
def RCMEq(rcmdata,nStp,doMask=False,doXYZ=doXYZ, doVerb=True):
bmX = rcmdata.GetVar("xMin",nStp)
bmY = rcmdata.GetVar("yMin",nStp)
bmX = rcmdata.GetVar("xMin",nStp, doVerb=doVerb)
bmY = rcmdata.GetVar("yMin",nStp, doVerb=doVerb)
if (doXYZ):
bmZ = rcmdata.GetVar("zMin",nStp)
bmZ = rcmdata.GetVar("zMin",nStp, doVerb=doVerb)
bmP = np.arctan2(bmY,bmX)
bmR = np.sqrt(bmX*bmX + bmY*bmY + bmZ*bmZ)
bmX = bmR*np.cos(bmP)
@@ -52,22 +52,22 @@ def RCMEq(rcmdata,nStp,doMask=False,doXYZ=doXYZ):
bmY = ma.masked_array(bmY,mask=I)
return bmX,bmY
def GetVarMask(rcmdata,nStp,Qid="P",I=None):
def GetVarMask(rcmdata,nStp,Qid="P",I=None, doVerb=True):
if (I is None):
I = GetMask(rcmdata,nStp)
Q = rcmdata.GetVar(Qid,nStp)
Q = rcmdata.GetVar(Qid,nStp, doVerb=doVerb)
Q = ma.masked_array(Q,mask=I)
return Q
def GetPotential(rcmdata,nStp,I=None,NumCP=25):
def GetPotential(rcmdata,nStp,I=None,NumCP=25, doVerb=True):
if (I is None):
I = GetMask(rcmdata,nStp)
pot = (1.0e-3)*rcmdata.GetVar("pot",nStp)
pot = (1.0e-3)*rcmdata.GetVar("pot",nStp, doVerb=doVerb)
if (doCorot):
#Add corotation potential
colat = GetVarMask(rcmdata,nStp,"colat" ,I)
colat = GetVarMask(rcmdata,nStp,"colat" ,I, doVerb=doVerb)
pcorot = -Psi0*(RioRe)*(np.sin(colat)**2.0)
pot = pot + pcorot
pMag = np.abs(pot).max()
@@ -77,20 +77,20 @@ def GetPotential(rcmdata,nStp,I=None,NumCP=25):
#Calculate mask
#doRCM: Do RCM domain or full closed region
def GetMask(rcmdata,nStp):
IOpen = rcmdata.GetVar("IOpen",nStp)
def GetMask(rcmdata,nStp, doVerb=True):
IOpen = rcmdata.GetVar("IOpen",nStp, doVerb=doVerb)
if (doEll):
ioCut = -0.5
else:
ioCut = 0.5
bmX = rcmdata.GetVar("xMin",nStp)
bmY = rcmdata.GetVar("yMin",nStp)
bmX = rcmdata.GetVar("xMin",nStp, doVerb=doVerb)
bmY = rcmdata.GetVar("yMin",nStp, doVerb=doVerb)
bmR = np.sqrt(bmX*bmX + bmY*bmY)
Ir = (bmR<rMin) | (bmR>rMax)
if (doCut):
Prcm = rcmdata.GetVar("P",nStp)
Prcm = rcmdata.GetVar("P",nStp, doVerb=doVerb)
I = Ir | (IOpen > ioCut) | (Prcm<pCut)
else:
I = Ir | (IOpen > ioCut)
@@ -115,7 +115,7 @@ def RCMInset(AxRCM,rcmdata,nStp,vP,pCol="k",doPP=True):
Npp = GetVarMask(rcmdata,nStp,"Npsph" ,I)
#Start plotting
AxRCM.pcolor(bmX,bmY,Prcm,norm=vP,cmap=pCMap)
AxRCM.pcolor(bmX,bmY,Prcm,norm=vP,cmap=pCMap,shading='auto')
AxRCM.plot(bmX,bmY,color=eCol,linewidth=eLW)
AxRCM.plot(bmX.T,bmY.T,color=eCol,linewidth=eLW)
if (pCol is not None):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,669 +0,0 @@
#Various tools to post-process and analyze Gamera heliosphere runs
from kaipy.kdefs import *
import kaipy.gamera.gampp
from kaipy.gamera.gampp import GameraPipe
import numpy as np
import glob
import kaipy.kaiH5 as kh5
import timeit
#Object to pull from MPI/Serial heliosphere runs (H5 data), extends base
ffam = "monospace"
dLabC = "black" #Default label color
dLabFS = "medium" #Default label size
dBoxC = "lightgrey" #Default box color
TINY = 1.0e-8
MK = 1.e6 #MegaKelvin
#Adapted to helio grid
class GamsphPipe(GameraPipe):
#Initialize object, rely on base class, take optional unit identifier
def __init__(self,fdir,ftag,doFast=False,uID="Inner",doParallel=False,nWorkers=4):
print("Initializing %s heliosphere"%(uID))
#units for inner helio
self.bScl = 100. #->nT
self.vScl = 150. #-> km/s
self.tScl = 4637. #->seconds
self.dScl = 200. #cm-3
self.TScl = 1.e-6/4/np.pi/200./kbltz/MK #in MK
# units for OHelio
#self.bScl = 5. #->nT
#self.vScl = 34.5 #-> km/s
#self.tScl = 1.4e8/34.5
#self.dScl = 10. #cm-3
#self.TScl = 0.144 #in MK
#2D equatorial grid
self.xxi = [] ; self.yyi = [] #corners
self.xxc = [] ; self.yyc = [] #centers
#base class, will use OpenPipe below
GameraPipe.__init__(self,fdir,ftag,doFast=doFast,doParallel=doParallel,nWorkers=nWorkers)
#inner boundary distance
self.R0 = self.xxc[0,0]
#j and k for radial profile
self.jRad = self.Nj//2
self.kRad = self.Nk//4
def OpenPipe(self,doVerbose=True):
GameraPipe.OpenPipe(self,doVerbose)
if (self.UnitsID != "CODE"):
self.bScl = 1.0 #->nT
self.vScl = 1.0 #-> km/s
self.tScl = 1.0 #-> Seconds
self.dScl = 1.0 #-> cm-3
self.TScl = 1.0/kbltz/MK #-> MKelvin
#Rescale time
self.T = self.tScl*self.T
Neq_a = self.Nj//2 #cell above eq plane
Nr = self.Ni
Np = self.Nk
#corners in eq XY plane
self.xxi = np.zeros((Nr+1,Np+1))
self.yyi = np.zeros((Nr+1,Np+1))
#centers
self.xxc = np.zeros((Nr ,Np ))
self.yyc = np.zeros((Nr ,Np ))
#Grid for equatorial plane. Should probably be done as a separate function
#equatorial plane
#corners i,k in eq plane, j index is Neq_a
self.xxi[:,:] = self.X[:,Neq_a,:]
self.yyi[:,:] = self.Y[:,Neq_a,:]
#centers i,k
self.xxc = 0.25*(self.xxi[:-1,:-1] + self.xxi[1:,:-1] + self.xxi[:-1,1:] + self.xxi[1:,1:])
self.yyc = 0.25*(self.yyi[:-1,:-1] + self.yyi[1:,:-1] + self.yyi[:-1,1:] + self.yyi[1:,1:])
r = np.sqrt(self.xxc**2.0 + self.yyc**2.0)
if (self.hasMJD):
print("Found MJD data")
print("\tTime (Min/Max) = %f/%f"%(self.MJDs.min(),self.MJDs.max()))
#Var eq slice
def EqSlice(self,vID,sID=None,vScl=None,doEq=True,doVerb=True):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
Nj2 = self.Nj//2
#above and below the eq plane
ja = Nj2 - 1
jb = ja + 1
Nr = self.Ni
Np = self.Nk
#equatorial j-slice of var
Qj = np.zeros((Nr,Np))
#taking average above/below eq plane
Qj[:,:] = 0.5*( Q[:,ja,:] + Q[:,jb,:] )
return Qj
#Var theta slice
def jSlice(self,vID,sID=None,vScl=None,doEq=True,doVerb=True,jidx=-1):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
if(jidx == -1):
Nj2 = self.Nj//2
else:
Nj2 = jidx
#above and below the j plane
ja = Nj2 - 1
jb = ja + 1
Nr = self.Ni
Np = self.Nk
#equatorial j-slice of var
Qj = np.zeros((Nr,Np))
#taking average above/below eq plane
Qj[:,:] = 0.5*( Q[:,ja,:] + Q[:,jb,:] )
return Qj
#Radial profile thru cell centers
def RadialProfileGrid(self):
self.GetGrid(doVerbose=True)
#cell corners
x = self.X [:,:,:]
y = self.Y [:,:,:]
z = self.Z [:,:,:]
#cell centers
x_c = 0.125*(x[:-1,:-1,:-1]+x[:-1,:-1,1:]+x[:-1,1:,:-1]+x[:-1,1:,1:]+
x[1:,:-1,:-1]+x[1:,:-1,1:]+x[1:,1:,:-1]+x[1:,1:,1:])
y_c = 0.125*(y[:-1,:-1,:-1]+y[:-1,:-1,1:]+y[:-1,1:,:-1]+y[:-1,1:,1:]+
y[1:,:-1,:-1]+y[1:,:-1,1:]+y[1:,1:,:-1]+y[1:,1:,1:])
z_c = 0.125*(z[:-1,:-1,:-1]+z[:-1,:-1,1:]+z[:-1,1:,:-1]+z[:-1,1:,1:]+
z[1:,:-1,:-1]+z[1:,:-1,1:]+z[1:,1:,:-1]+z[1:,1:,1:])
#radius of cell centers
jR = self.jRad
kR = self.kRad
r = np.sqrt(x_c[:,jR,kR]**2.0 + y_c[:,jR,kR]**2.0 + z_c[:,jR,kR]**2.)
return r
#NOT USED merid plane Y=0
def MeridGrid(self):
#Get Grid
self.GetGrid(doVerbose=True)
Nk2 = self.Nk//2
Nt = self.Nj
#kooking from -Y to XZ plane
xright = self.X[:,:,0] #corners
xleft = self.X [:,:,Nk2]
zright = self.Z[:,:,0] #corners
zleft = self.Z[:,:,Nk2]
#stack right and left together
xmer = np.hstack( (xright, xleft[:,::-1]) ) #reverse j
zmer = np.hstack( (zright, zleft[:,::-1]) ) #reverse j
#cell centers
xmer_c = 0.25*( xmer[:-1,:-1]+xmer[:-1,1:]+xmer[1:,:-1]+xmer[1:,1:] )
xmer_c = np.delete(xmer_c, Nt, axis = 1)
zmer_c = 0.25*( zmer[:-1,:-1]+zmer[:-1,1:]+zmer[1:,:-1]+zmer[1:,1:] )
zmer_c = np.delete(zmer_c, Nt, axis = 1)
return xmer_c, zmer_c
#merid plane from kidx from two halfs
def MeridGridHalfs(self,kidx=None,phi=None):
self.GetGrid(doVerbose=True)
if(kidx):
Nk1 = kidx
Nk2 = self.Nk//2 + Nk1
elif(phi):
Nk1 = int(phi/(2*np.pi)*self.Nk)
Nk2 = self.Nk//2 + Nk1
else:
Nk1 = 0
Nk2 = self.Nk//2
#looking from -Y to XZ plane
xright = self.X[:,:,Nk1] #corners
yright = self.Y[:,:,Nk1] #corners
zright = self.Z[:,:,Nk1] #corners
xleft = self.X[:,:,Nk2]
yleft = self.Y[:,:,Nk2]
zleft = self.Z[:,:,Nk2]
rleft = -np.sqrt(xleft**2. + yleft**2)
xright_c = 0.25*( xright[:-1,:-1]+xright[:-1,1:]+xright[1:,:-1]+xright[1:,1:] )
yright_c = 0.25*( yright[:-1,:-1]+yright[:-1,1:]+yright[1:,:-1]+yright[1:,1:] )
zright_c = 0.25*( zright[:-1,:-1]+zright[:-1,1:]+zright[1:,:-1]+zright[1:,1:] )
r = np.sqrt(xright_c**2 + zright_c**2 + yright_c**2)
#centers: right plane, left plane, radius
return xright, yright, zright, xleft, yleft, zleft, r
#Grid at 1 AU lat lon
def iSliceGrid(self,idx=-1):
#Get Grid
self.GetGrid(doVerbose=True)
rxy = np.sqrt(self.X**2 + self.Y**2)
theta = np.arctan2(rxy,self.Z)
phi = np.arctan2(self.Y,self.X)
#theta [theta < 0] += np.pi/2.
theta += -np.pi/2.
theta = theta*180./np.pi
phi [phi < 0] += 2*np.pi
phi = phi*180./np.pi
#last i-index == face of the last cell
lat = theta[idx,::-1,:]
lon = phi[idx,:,:]
#these are corners
return lat, lon
#Vars at Y=0
def MeridSlice(self,vID,sID=None,vScl=None,doVerb=True,indx=(None,None)):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
Nk2 = self.Nk//2
kidx, phi = indx
if(kidx):
Nk1 = kidx
Nk2 = self.Nk//2 + Nk1
Np = Nk1 - 1
elif(phi):
Nk1 = int(phi/(2*np.pi)*self.Nk)
Nk2 = self.Nk//2 + Nk1
Np = Nk1 - 1
else:
Nk1 = 0
Nk2 = self.Nk//2
Np = self.Nk
#Nr = self.Ni
#Nt = 2*self.Nj
#XZ meridional slice (k=0) of var
#Qj = np.zeros((Nr,Nt))
Qright = 0.5*( Q[:,:,Nk1] + Q[:,:,Np-1] )
Qleft = 0.5*( Q[:,:,Nk2-1] + Q[:,:,Nk2] )
#print (Qright.shape, Qleft.shape)
#Qj = np.hstack( (Qright, Qleft[:,::-1]) ) #reverse in j
#print (Qj.shape)
return Qright, Qleft
#Var at 1 AU
def iSliceVar(self,vID,sID=None,vScl=None,doVerb=True,idx=-1):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
#cell centered values from the last cell
Qi = Q[idx,:,:]
#cell centered values from the first cell
#Qi = Q[0,:,:]
#jd_c = self.MJDs[sID]
#print ('jd_c = ', jd_c)
return Qi
#Var along 1D radial line
def RadialProfileVar(self,vID,sID=None,vScl=None,doVerb=True):
#Get full 3D variable first
Q = self.GetVar(vID,sID,vScl,doVerb)
#set j and k for a radial profile
jR = self.jRad
kR = self.kRad
Nr = self.Ni
Qi = np.zeros(Nr)
#variable in a cell center
Qi = Q[:,jR,kR]
return Qi
#Radial Profile: Normalized Density
def RadProfDen(self,s0=0):
D = self.RadialProfileVar("D", s0)
r = self.RadialProfileGrid()
Norm = r**2./r[0]/r[0]
D = D*Norm*self.dScl
return D
#Radial Profile: Speed
def RadProfSpeed(self,s0=0):
Vx = self.RadialProfileVar("Vx", s0)
Vy = self.RadialProfileVar("Vy", s0)
Vz = self.RadialProfileVar("Vz", s0)
MagV = self.vScl*np.sqrt(Vx**2.0+Vy**2.0+Vz**2.0)
return MagV
#Radial Profile: Normalized Flux rho*V*r^2
def RadProfFlux(self,s0=0):
D = self.RadialProfileVar("D", s0)
Vx = self.RadialProfileVar("Vx", s0)
Vy = self.RadialProfileVar("Vy", s0)
Vz = self.RadialProfileVar("Vz", s0)
r = self.RadialProfileGrid()
Norm = r[:]**2./r[0]/r[0]
Flux = D*Norm*self.dScl*self.vScl*np.sqrt(Vx**2.0+Vy**2.0+Vz**2.0)
return Flux
#Speed at 1 AU
def iSliceMagV(self,s0=0,idx=-1):
Vx = self.iSliceVar("Vx",s0,idx=idx) #Unscaled
Vy = self.iSliceVar("Vy",s0,idx=idx) #Unscaled
Vz = self.iSliceVar("Vz",s0,idx=idx) #Unscaled
Vi = self.vScl*np.sqrt(Vx**2.0+Vy**2.0+Vz**2.0)
return Vi
#Density at 1 AU
def iSliceD(self,s0=0,idx=-1):
Di = self.iSliceVar("D",s0,idx=idx) #Unscaled
Di = Di*self.dScl
return Di
#Br at 1 AU
def iSliceBr(self,s0=0,idx=-1):
Bx = self.iSliceVar("Bx",s0,idx=idx) #Unscaled
By = self.iSliceVar("By",s0,idx=idx) #Unscaled
Bz = self.iSliceVar("Bz",s0,idx=idx) #Unscaled
self.GetGrid(doVerbose=True)
x = self.X[-1,:,:]
y = self.Y[-1,:,:]
z = self.Z[-1,:,:]
#centers
x_c = 0.25*( x[:-1,:-1]+x[:-1,1:]+x[1:,:-1]+x[1:,1:] )
y_c = 0.25*( y[:-1,:-1]+y[:-1,1:]+y[1:,:-1]+y[1:,1:] )
z_c = 0.25*( z[:-1,:-1]+z[:-1,1:]+z[1:,:-1]+z[1:,1:] )
Br = self.bScl*(Bx*x_c + By*y_c + Bz*z_c)/np.sqrt(x_c**2.+y_c**2.+z_c**2.)
return Br
#Bx at 1AU
def iSliceBx(self,s0=0,idx=-1):
Bx = self.iSliceVar("Bx",s0,idx=idx) #Unscaled
self.GetGrid(doVerbose=True)
x = self.X[-1,:,:]
#centers
x_c = 0.25*( x[:-1,:-1]+x[:-1,1:]+x[1:,:-1]+x[1:,1:] )
BxScl = self.bScl*Bx*x_c
return BxScl
#By at 1AU
def iSliceBy(self,s0=0,idx=-1):
By = self.iSliceVar("By",s0,idx=idx) #Unscaled
self.GetGrid(doVerbose=True)
y = self.Y[-1,:,:]
#centers
y_c = 0.25*( y[:-1,:-1]+y[:-1,1:]+y[1:,:-1]+y[1:,1:] )
ByScl = self.bScl*By*y_c
return ByScl
#Bz at 1AU
def iSliceBz(self,s0=0,idx=-1):
Bz = self.iSliceVar("Bz",s0,idx=idx) #Unscaled
self.GetGrid(doVerbose=True)
z = self.Z[-1,:,:]
#centers
z_c = 0.25*( z[:-1,:-1]+z[:-1,1:]+z[1:,:-1]+z[1:,1:] )
BzScl = self.bScl*Bz*z_c
return BzScl
#Br at first cell
def iSliceBrBound(self,s0=0,idx=-1):
Bx = self.iSliceVar("Bx",s0,idx=idx) #Unscaled
By = self.iSliceVar("By",s0,idx=idx) #Unscaled
Bz = self.iSliceVar("Bz",s0,idx=idx) #Unscaled
self.GetGrid(doVerbose=True)
x = self.X[0,:,:]
y = self.Y[0,:,:]
z = self.Z[0,:,:]
#centers
x_c = 0.25*( x[:-1,:-1]+x[:-1,1:]+x[1:,:-1]+x[1:,1:] )
y_c = 0.25*( y[:-1,:-1]+y[:-1,1:]+y[1:,:-1]+y[1:,1:] )
z_c = 0.25*( z[:-1,:-1]+z[:-1,1:]+z[1:,:-1]+z[1:,1:] )
Br = self.bScl*(Bx*x_c + By*y_c + Bz*z_c)/np.sqrt(x_c**2.+y_c**2.+z_c**2.)
return Br
#temperature at 1 AU
def iSliceT(self,s0=0,idx=-1):
Pi = self.iSliceVar("P",s0,idx=idx) #Unscaled
Di = self.iSliceVar("D",s0,idx=idx) #Unscaled
Temp = Pi/Di*self.TScl
return Temp
#Equatorial speed (in km/s) in eq plane
def eqMagV(self,s0=0):
Vx = self.EqSlice("Vx",s0) #Unscaled
Vy = self.EqSlice("Vy",s0) #Unscaled
Vz = self.EqSlice("Vz",s0) #Unscaled
Veq = self.vScl*np.sqrt(Vx**2.0+Vy**2.0+Vz**2.0)
return Veq
#Equatorial speed (in km/s) in eq plane
def jMagV(self,s0=0,jidx=-1):
Vx = self.jSlice("Vx",s0,jidx=jidx) #Unscaled
Vy = self.jSlice("Vy",s0,jidx=jidx) #Unscaled
Vz = self.jSlice("Vz",s0,jidx=jidx) #Unscaled
Veq = self.vScl*np.sqrt(Vx**2.0+Vy**2.0+Vz**2.0)
return Veq
#Normalized density (D*r*r/21.5/21.5 in cm-3) in eq plane
def eqNormD (self,s0=0):
D = self.EqSlice("D",s0) #Unscaled
Norm = (self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
NormDeq = self.dScl*D*Norm
return NormDeq
#Normalized density (D*r*r/21.5/21.5 in cm-3) in eq plane
def jNormD (self,s0=0,jidx=-1):
D = self.jSlice("D",s0,jidx=jidx) #Unscaled
Norm = (self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
NormDeq = self.dScl*D*Norm
return NormDeq
#Normalized Br (Br*r*r/21.5/21.5) in eq plane
def eqNormBr (self,s0=0):
Bx = self.EqSlice("Bx",s0) #Unscaled
By = self.EqSlice("By",s0) #Unscaled
Bz = self.EqSlice("Bz",s0) #Unscaled
Br = (Bx*self.xxc + By*self.yyc)*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
NormBreq = self.bScl*Br
return NormBreq
#Normalized Br (Br*r*r/21.5/21.5) in eq plane
def jNormBr (self,s0=0,jidx=-1):
Bx = self.jSlice("Bx",s0,jidx=jidx) #Unscaled
By = self.jSlice("By",s0,jidx=jidx) #Unscaled
Bz = self.jSlice("Bz",s0,jidx=jidx) #Unscaled
Br = (Bx*self.xxc + By*self.yyc)*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
NormBreq = self.bScl*Br
return NormBreq
#Normalized Br (Br*r*r/21.5/21.5) in eq plane
def eqBx (self,s0=0):
Bx = self.EqSlice("Bx",s0) #Unscaled
BxScl = (Bx*self.xxc + Bx*self.yyc)*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
BxScl = self.bScl*BxScl
return BxScl
#Normalized Br (Br*r*r/21.5/21.5) in eq plane
def eqBy (self,s0=0):
By = self.EqSlice("By",s0) #Unscaled
ByScl = (By*self.xxc + By*self.yyc)*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0/self.R0
ByScl= self.bScl*ByScl
return ByScl
#Normalized Br (Br*r*r/21.5/21.5) in eq plane
def eqBz (self,s0=0):
Bz = self.EqSlice("Bz",s0) #Unscaled
BzScl = Bz
BzScl = self.bScl*BzScl
return BzScl
#Temperature T(r/r0) in eq plane
def eqTemp (self,s0=0):
Pres = self.EqSlice("P",s0)
D = self.EqSlice("D",s0)
#T(r/r0)
Temp = Pres/D*self.TScl*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0
return Temp
#Temperature T(r/r0) in eq plane
def jTemp (self,s0=0,jidx=-1):
Pres = self.jSlice("P",s0,jidx=jidx)
D = self.jSlice("D",s0,jidx=jidx)
#T(r/r0)
Temp = Pres/D*self.TScl*np.sqrt(self.xxc**2.0 + self.yyc**2.0)/self.R0
return Temp
#Meridional speed (in km/s) in Y=indx plane
def MerMagV(self,s0=0,indx=(None,None)):
Vxr, Vxl = self.MeridSlice("Vx",s0,indx=indx) #Unscaled
Vyr, Vyl = self.MeridSlice("Vy",s0,indx=indx) #Unscaled
Vzr, Vzl = self.MeridSlice("Vz",s0,indx=indx) #Unscaled
MagVr = self.vScl*np.sqrt(Vxr**2.0+Vyr**2.0+Vzr**2.0)
MagVl = self.vScl*np.sqrt(Vxl**2.0+Vyl**2.0+Vzl**2.0)
return MagVr, MagVl
#Normalized D in Y=indx plane
def MerDNrm(self,s0=0,indx=(None,None)):
xr, yr, zr, xl, yl, zl, r = self.MeridGridHalfs(*indx)
Dr, Dl = self.MeridSlice("D",s0,indx=indx) #Unscaled
Drn = Dr*self.dScl*r*r/self.R0/self.R0
Dln = Dl*self.dScl*r*r/self.R0/self.R0
return Drn, Dln
#Mormalized Br in Y=indx plane
def MerBrNrm(self,s0=0,indx=(None,None)):
xr, yr, zr, xl, yl, zl, r = self.MeridGridHalfs(*indx)
#rxyr = np.sqrt(xr**2. + yr**2)
#rxyl = -np.sqrt(xl**2. + yl**2)
Bxr, Bxl = self.MeridSlice("Bx",s0,indx=indx) #Unscaled
Byr, Byl = self.MeridSlice("By",s0,indx=indx) #Unscaled
Bzr, Bzl = self.MeridSlice("Bz",s0,indx=indx) #Unscaled
#cell centers to calculate Br
xr_c = 0.25*( xr[:-1,:-1]+xr[:-1,1:]+xr[1:,:-1]+xr[1:,1:] )
yr_c = 0.25*( yr[:-1,:-1]+yr[:-1,1:]+yr[1:,:-1]+yr[1:,1:] )
zr_c = 0.25*( zr[:-1,:-1]+zr[:-1,1:]+zr[1:,:-1]+zr[1:,1:] )
xl_c = 0.25*( xl[:-1,:-1]+xl[:-1,1:]+xl[1:,:-1]+xl[1:,1:] )
yl_c = 0.25*( yl[:-1,:-1]+yl[:-1,1:]+yl[1:,:-1]+yl[1:,1:] )
zl_c = 0.25*( zl[:-1,:-1]+zl[:-1,1:]+zl[1:,:-1]+zl[1:,1:] )
#calculating Br
Br_r = (Bxr*xr_c + Byr*yr_c + Bzr*zr_c)*r*self.bScl/self.R0/self.R0
Br_l = (Bxl*xl_c + Byl*yl_c + Bzl*zl_c)*r*self.bScl/self.R0/self.R0
return Br_r, Br_l
#Normalized Temp in Y=indx plane
def MerTemp(self,s0=0,indx=(None,None)):
xr, yr, zr, xl, yl, zl, r = self.MeridGridHalfs(*indx)
Pr, Pl = self.MeridSlice("P",s0,indx=indx) #Unscaled
Dr, Dl = self.MeridSlice("D",s0,indx=indx) #Unscaled
Tempr = Pr/Dr*self.TScl*r/self.R0
Templ = Pl/Dl*self.TScl*r/self.R0
return Tempr, Templ
#Not used for helio as of now
#Return data for meridional 2D field lines
#Need to use Cartesian grid
def bStream(self,s0=0,xyBds=[-35,25,-25,25],dx=0.05):
#Get field data
U = self.bScl*self.EggSlice("Bx",s0,doEq=False)
V = self.bScl*self.EggSlice("Bz",s0,doEq=False)
x1,y1,gu,gv,gM = self.doStream(U,V,xyBds,dx)
return x1,y1,gu,gv,gM
def vStream(self,s0=0,xyBds=[-35,25,-25,25],dx=0.05):
#Get field data
U = self.vScl*self.EggSlice("Vx",s0,doEq=True)
V = self.vScl*self.EggSlice("Vy",s0,doEq=True)
x1,y1,gu,gv,gM = self.doStream(U,V,xyBds,dx)
return x1,y1,gu,gv,gM
#Add time label, xy is position in axis (not data) coords
def AddTime(self,n,Ax,xy=[0.9,0.95],cLab=dLabC,fs=dLabFS,T0=0.0,doBox=True,BoxC=dBoxC):
ffam = "monospace"
HUGE = 1.0e+8
#Decide whether to do UT or elapsed
if (self.hasMJD):
minMJD = self.MJDs[n-self.s0]
else:
minMJD = -HUGE
if (self.hasMJD and minMJD>TINY):
from astropy.time import Time
dtObj = Time(self.MJDs[n-self.s0],format='mjd').datetime
tStr = " " + dtObj.strftime("%H:%M:%S") + "\n" + dtObj.strftime("%m/%d/%Y")
else:
#Get time in seconds
t = self.T[n-self.s0] - T0
Nm = int( (t-T0)/60.0 ) #Minutes, integer
Hr = Nm/60
Min = np.mod(Nm,60)
Sec = np.mod(int(t),60)
tStr = "Elapsed Time\n %02d:%02d:%02d"%(Hr,Min,Sec)
if (doBox):
Ax.text(xy[0],xy[1],tStr,color=cLab,fontsize=fs,transform=Ax.transAxes,family=ffam,bbox=dict(boxstyle="round",fc=dBoxC))
#def AddSW(self,n,Ax,xy=[0.725,0.025],cLab=dLabC,fs=dLabFS,T0=0.0,doBox=True,BoxC=dBoxC,doAll=True):
# import kaipy.kaiH5 as kh5
# #Start by getting SW data
# vIDs = ["D","P","Vx","Bx","By","Bz"]
# Nv = len(vIDs)
# qSW = np.zeros(Nv)
# if (self.isMPI):
# fSW = self.fdir + "/" + kh5.genName(self.ftag,self.Ri-1,0,0,self.Ri,self.Rj,self.Rk)
# else:
# fSW = self.fdir + "/" + self.ftag + ".h5"
#
# for i in range(Nv):
# Q = kh5.PullVar(fSW,vIDs[i],n)
# qSW[i] = Q[-1,0,0]
# D = qSW[0] ; P = qSW[1] ; Vx = qSW[2] ; Bx = qSW[3] ; By = qSW[4] ; Bz = qSW[5]
# SWStr = "Solar Wind\n"
# MagB = self.bScl*np.sqrt(Bx**2.0+By**2.0+Bz**2.0)
# #Clock = atan(by/bz), cone = acos(Bx/B)
# r2deg = 180.0/np.pi
# if (MagB>TINY):
# clk = r2deg*np.arctan2(By,Bz)
# cone = r2deg*np.arccos(self.bScl*Bx/MagB)
# else:
# clk = 0.0
# cone = 0.0
# if (clk<0):
# clk = clk+360.0
# Deg = r"$\degree$"
# SWStr = "Solar Wind\nIMF: %4.1f [nT], %5.1f"%(MagB,clk) + Deg# + ", %5.2f"%(cone) + Deg
# if (doAll):
# SWStr = SWStr + "\nDensity: %5.1f [#/cc] \nSpeed: %6.1f [km/s] "%(D,self.vScl*np.abs(Vx))
# if (doBox):
# Ax.text(xy[0],xy[1],SWStr,color=cLab,fontsize=fs,transform=Ax.transAxes,family=ffam,bbox=dict(boxstyle="round",fc=dBoxC))
# else:
# Ax.text(xy[0],xy[1],SWStr,color=cLab,fontsize=fs,transform=Ax.transAxes,family=ffam,bbox=dict(boxstyle="round",fc=dBoxC))
#def AddCPCP(self,n,Ax,xy=[0.9,0.95],cLab=dLabC,fs=dLabFS,doBox=True,BoxC=dBoxC):
# cpcp = self.GetCPCP(n)
# tStr = "CPCP (North/South)\n%6.2f / %6.2f [kV]"%(cpcp[0],cpcp[1])
# if (doBox):
# Ax.text(xy[0],xy[1],tStr,color=cLab,fontsize=fs,transform=Ax.transAxes,family=ffam,bbox=dict(boxstyle="round",fc=dBoxC))
# else:
# Ax.text(xy[0],xy[1],tStr,color=cLab,fontsize=fs,transform=Ax.transAxes,family=ffam)

View File

@@ -3,7 +3,7 @@ import numpy as np
import h5py
import kaipy.kaiH5 as kh5
import xml.etree.ElementTree as et
import kaipy.kdefs as kdefs
#Add grid info to step
#Geom is topology subelement, iDims is grid size string
def AddGrid(fname,Geom,iDims,coordStrs):
@@ -59,7 +59,7 @@ def getRootVars(fname,gDims):
#Don't include stuff that starts with step or X,Y,Z
vID = str(k)
doV = True
if ("Step" in vID):
if ("Step" in vID or kdefs.grpTimeCache in vID ):
doV = False
if ((vID == "X") or (vID=="Y") or (vID=="Z")):
doV = False

View File

@@ -53,6 +53,7 @@ Rsolar = 6.957e10 # [cm] Solar radius
kbltz = 1.38e-16 # [erg/K] Boltzmann constant
mp = 1.67e-24 # [g] Proton mass
Tsolar = 25.38 # [days] Siderial solar rotation period
Tsolar_synodic = 27.28 # [days] Synodic solar rotation period
JD2MJD = 2400000.5 # Conversion from JD to MJD: MJD = JD - 2400000.5
Day2s = 86400. # [s] Conversion days => seconds
vc_cgs = 2.99792458e10 # [cm/s] speed of light

View File

@@ -23,7 +23,7 @@ def addTime(base, fmt="h", ut0Str=defaultUT):
scriptStr = r"""
timeVal = vtk.vtkStringArray()
timeVal.SetName('{:s}')
t = inputs[0].GetInformation().Get(vtk.vtkDataObject.DATA_TIME_STEP())
t = self.GetInput().GetInformation().Get(vtk.vtkDataObject.DATA_TIME_STEP())
{:s}

View File

@@ -0,0 +1,169 @@
MLT Kp Intercept L log10(E) L^2 log10(E)^2 L^3 log10(E)^3 log10(E)*L log10(E)*L^2 log10(E)^2*L Adjusted R2
0 1 -9.717181e+00 7.204274e+00 -3.773921e+00 -1.496923e+00 -8.897443e-01 1.122206e-01 1.720684e-01 7.319817e-01 7.886410e-02 3.525678e-01 0.82448
0 2 -2.945411e+00 3.066352e+00 -3.141152e+00 -6.402161e-01 -1.039579e+00 5.404518e-02 1.168904e-01 9.266642e-01 5.994503e-02 3.966304e-01 0.93159
0 3 9.890714e-01 6.886358e-01 -1.466153e+00 -1.451928e-01 -4.153113e-01 2.069758e-02 1.924237e-01 9.012235e-01 5.583555e-02 3.945901e-01 0.95829
0 4 3.852039e+00 -1.176268e+00 -4.278696e-01 2.504448e-01 9.717711e-02 -5.547521e-03 2.740005e-01 9.408564e-01 5.187616e-02 4.049745e-01 0.96932
0 5 6.006599e+00 -2.177471e+00 1.988820e+00 4.797396e-01 1.590386e+00 -2.211532e-02 5.205646e-01 1.016683e+00 3.518574e-02 3.863031e-01 0.97468
0 6 9.362199e+00 -4.631816e+00 2.171715e+00 9.973425e-01 1.628007e+00 -5.584455e-02 5.182470e-01 9.777994e-01 3.918595e-02 3.857173e-01 0.97183
0 7 1.407725e+01 -7.921180e+00 2.665594e+00 1.685897e+00 1.719137e+00 -1.007732e-01 5.024012e-01 8.108719e-01 4.952734e-02 3.557380e-01 0.96121
1 1 -7.359401e+00 5.884180e+00 -2.919064e+00 -1.280964e+00 -7.194898e-01 1.004658e-01 1.848896e-01 4.476771e-01 9.795562e-02 3.246588e-01 0.8113
1 2 -9.211592e-01 1.879919e+00 -2.331452e+00 -4.555934e-01 -9.799721e-01 4.440510e-02 9.607246e-02 5.884254e-01 8.034954e-02 3.569996e-01 0.93061
1 3 2.214357e+00 -1.302451e-01 -1.253408e+00 -2.825422e-02 -6.760874e-01 1.478623e-02 1.114119e-01 6.211463e-01 6.977352e-02 3.569444e-01 0.95885
1 4 4.602497e+00 -1.757413e+00 -5.268525e-01 3.225730e-01 -3.095982e-01 -8.982241e-03 1.642781e-01 7.068561e-01 5.973179e-02 3.663537e-01 0.96771
1 5 5.647597e+00 -2.090594e+00 2.072220e+00 4.521858e-01 1.630797e+00 -2.093337e-02 5.073641e-01 9.731945e-01 2.705271e-02 3.561372e-01 0.9769
1 6 9.041642e+00 -4.539197e+00 2.387552e+00 9.639003e-01 1.697711e+00 -5.406964e-02 5.076273e-01 8.951338e-01 3.382134e-02 3.521105e-01 0.97444
1 7 1.358006e+01 -7.724426e+00 2.883817e+00 1.632235e+00 1.787948e+00 -9.760212e-02 4.934266e-01 7.232415e-01 4.596935e-02 3.247448e-01 0.96432
2 1 -3.874943e+00 4.102231e+00 -1.373509e+00 -1.041292e+00 -4.183570e-01 9.227025e-02 2.361563e-01 -1.138304e-01 1.485082e-01 3.037375e-01 0.7916
2 2 1.135135e+00 8.053878e-01 -1.219720e+00 -3.253353e-01 -8.572429e-01 4.055670e-02 9.456559e-02 1.233136e-01 1.146499e-01 3.188955e-01 0.92407
2 3 3.470548e+00 -8.068562e-01 -5.982210e-01 3.832536e-02 -7.467759e-01 1.380794e-02 7.074457e-02 2.502543e-01 9.399586e-02 3.198763e-01 0.95775
2 4 5.298927e+00 -2.116907e+00 -7.789224e-02 3.310106e-01 -4.724148e-01 -6.974028e-03 9.885310e-02 3.806822e-01 7.604378e-02 3.223885e-01 0.96672
2 5 5.390708e+00 -2.080829e+00 1.977300e+00 4.387184e-01 1.568525e+00 -2.012174e-02 4.844172e-01 9.273081e-01 2.346362e-02 3.360744e-01 0.97834
2 6 8.740577e+00 -4.493841e+00 2.354275e+00 9.429094e-01 1.665505e+00 -5.276517e-02 4.895662e-01 8.388821e-01 3.088775e-02 3.306408e-01 0.97598
2 7 1.312859e+01 -7.579869e+00 2.884980e+00 1.591603e+00 1.778583e+00 -9.498086e-02 4.806610e-01 6.638055e-01 4.400758e-02 3.043388e-01 0.96631
3 1 -3.007360e+00 3.878241e+00 -2.503594e-01 -1.104900e+00 -2.463186e-01 1.027349e-01 2.620712e-01 -5.812605e-01 1.875912e-01 2.802112e-01 0.76887
3 2 1.428129e+00 7.946179e-01 -4.736181e-01 -4.029787e-01 -8.095079e-01 5.002179e-02 9.131136e-02 -2.468938e-01 1.429859e-01 2.920961e-01 0.91249
3 3 3.492771e+00 -7.037845e-01 -6.385505e-02 -4.812099e-02 -7.847110e-01 2.270458e-02 4.455866e-02 -6.340309e-02 1.147853e-01 2.888142e-01 0.95362
3 4 5.076508e+00 -1.881944e+00 3.548632e-01 2.252584e-01 -5.466503e-01 2.435504e-03 5.898316e-02 1.014577e-01 9.081530e-02 2.853255e-01 0.96397
3 5 5.003611e+00 -1.994916e+00 1.763108e+00 4.165897e-01 1.480360e+00 -1.889926e-02 4.619276e-01 9.120572e-01 1.959584e-02 3.219342e-01 0.97861
3 6 8.287509e+00 -4.367077e+00 2.157762e+00 9.136333e-01 1.595472e+00 -5.121285e-02 4.693425e-01 8.258174e-01 2.614249e-02 3.146205e-01 0.97598
3 7 1.256192e+01 -7.383831e+00 2.689740e+00 1.549352e+00 1.722078e+00 -9.259352e-02 4.643921e-01 6.559621e-01 3.934115e-02 2.896038e-01 0.96627
4 1 1.050305e+00 7.848777e-01 9.479916e-01 -3.083059e-01 1.306091e+00 3.153739e-02 4.688779e-01 -2.334505e-01 6.162030e-02 7.291783e-02 0.68054
4 2 4.040210e+00 -1.134349e+00 -1.503822e-01 7.706201e-02 -9.964354e-02 6.942414e-03 1.651045e-01 -2.702495e-02 5.874122e-02 1.475941e-01 0.89639
4 3 4.404270e+00 -1.469072e+00 1.979190e-03 1.664247e-01 -4.765612e-01 -5.731605e-05 3.152533e-02 7.061707e-02 5.191952e-02 1.669078e-01 0.95584
4 4 4.323673e+00 -1.548876e+00 2.847488e-01 2.024510e-01 -4.088867e-01 -3.419682e-03 -1.370144e-03 2.202261e-01 3.701387e-02 1.739477e-01 0.96832
4 5 2.515772e+00 -4.929388e-01 1.519877e+00 1.045296e-01 1.563366e+00 1.304891e-03 4.611017e-01 9.923663e-01 8.081022e-03 3.041743e-01 0.97983
4 6 3.614171e+00 -1.434655e+00 2.191388e+00 3.093934e-01 2.156221e+00 -1.268924e-02 5.007234e-01 1.031511e+00 -8.471922e-03 2.473691e-01 0.97928
4 7 1.181002e+00 8.305165e-02 4.211134e+00 5.334156e-04 3.472403e+00 6.208793e-03 4.135005e-01 8.435557e-01 -4.340292e-02 -2.193345e-02 0.90882
5 1 -3.635509e-01 1.537124e+00 6.839448e-01 -4.374485e-01 1.477097e+00 3.619611e-02 5.420787e-01 -3.200998e-02 4.117443e-02 1.085585e-01 0.66573
5 2 2.730877e+00 -4.037838e-01 -3.262462e-01 -5.815076e-02 8.927091e-02 1.267726e-02 2.225624e-01 1.105053e-01 3.799409e-02 1.519576e-01 0.85975
5 3 3.130800e+00 -7.408152e-01 -8.019317e-03 2.688265e-02 -2.800136e-01 6.361500e-03 6.581481e-02 1.283701e-01 3.327723e-02 1.422884e-01 0.94739
5 4 2.955802e+00 -7.650633e-01 2.303200e-01 5.182572e-02 -2.653012e-01 3.848474e-03 1.075704e-02 2.684518e-01 1.739558e-02 1.381535e-01 0.96545
5 5 2.124358e+00 -4.028353e-01 1.101238e+00 9.212299e-02 1.452351e+00 2.396013e-03 4.613711e-01 1.037621e+00 1.078170e-02 3.217369e-01 0.97517
5 6 5.591571e+00 -2.882277e+00 1.412788e+00 6.001848e-01 1.516588e+00 -3.005855e-02 4.286400e-01 9.915549e-01 1.723768e-02 3.235609e-01 0.95989
5 7 4.501496e-01 3.031204e-01 3.365214e+00 -1.985564e-02 3.409760e+00 6.876650e-03 4.318068e-01 1.082152e+00 -5.730041e-02 1.509701e-03 0.90629
6 1 -1.082603e+00 1.875584e+00 9.310370e-01 -4.950986e-01 1.787906e+00 3.858447e-02 6.521055e-01 1.961963e-02 4.202852e-02 1.523830e-01 0.71376
6 2 2.226915e+00 -1.704689e-01 -3.963971e-01 -1.029844e-01 3.216038e-01 1.479182e-02 3.368555e-01 1.955210e-01 3.696059e-02 1.988433e-01 0.8417
6 3 2.709992e+00 -5.353846e-01 -5.663882e-02 -1.585543e-02 -1.096716e-01 8.494261e-03 1.598446e-01 1.609246e-01 3.461982e-02 1.766625e-01 0.9334
6 4 2.586263e+00 -5.847657e-01 1.943038e-01 1.230286e-02 -1.479929e-01 5.925418e-03 8.955435e-02 2.580448e-01 2.255908e-02 1.664718e-01 0.95531
6 5 1.504292e+00 -1.921066e-01 4.736238e-01 4.748769e-02 1.344912e+00 6.977116e-03 5.404359e-01 1.107407e+00 2.810924e-02 4.008609e-01 0.96374
6 6 4.466400e+00 -2.382881e+00 6.159318e-01 5.076358e-01 1.471883e+00 -2.276222e-02 5.221047e-01 1.166062e+00 2.602738e-02 4.047381e-01 0.94311
6 7 2.351494e-01 1.666321e-01 2.214849e+00 2.901656e-02 3.314877e+00 4.447027e-03 5.253594e-01 1.391421e+00 -5.814033e-02 9.645109e-02 0.91061
7 1 -1.609647e+00 2.138282e+00 9.978059e-01 -5.454796e-01 2.010335e+00 4.129492e-02 7.025682e-01 7.441586e-02 3.843694e-02 1.605779e-01 0.74681
7 2 1.205435e+00 4.161637e-01 -6.240078e-01 -2.239074e-01 5.056336e-01 2.247393e-02 4.051097e-01 2.968045e-01 3.199971e-02 2.171849e-01 0.82391
7 3 1.735397e+00 2.397207e-02 -2.982400e-01 -1.309153e-01 3.509832e-02 1.574091e-02 2.127733e-01 2.436606e-01 2.895244e-02 1.849069e-01 0.91497
7 4 1.747765e+00 -1.078381e-01 -6.498805e-02 -8.557804e-02 -6.364141e-02 1.200978e-02 1.234992e-01 3.194689e-01 1.761723e-02 1.681927e-01 0.94378
7 5 5.800442e-01 2.740269e-01 -1.060418e-01 -3.751396e-02 1.318171e+00 1.332550e-02 5.758035e-01 1.244108e+00 3.008228e-02 4.363843e-01 0.95472
7 6 3.240336e+00 -1.725721e+00 5.217625e-02 3.879902e-01 1.535061e+00 -1.431980e-02 5.734181e-01 1.338752e+00 2.441768e-02 4.362915e-01 0.9364
7 7 -3.419454e-01 3.364636e-01 1.162533e+00 2.115812e-02 3.261958e+00 5.064395e-03 5.677576e-01 1.717444e+00 -7.004800e-02 1.465979e-01 0.91279
8 1 -1.308247e+00 1.888381e+00 9.812949e-01 -4.930137e-01 2.128228e+00 3.763054e-02 7.285720e-01 1.276625e-01 3.701688e-02 1.744106e-01 0.78102
8 2 6.496435e-01 7.090310e-01 -9.210358e-01 -2.847685e-01 6.275309e-01 2.650405e-02 4.616605e-01 4.027105e-01 2.954339e-02 2.436590e-01 0.81642
8 3 1.224925e+00 2.956671e-01 -5.856631e-01 -1.880907e-01 1.435859e-01 1.951742e-02 2.621966e-01 3.290408e-01 2.645165e-02 2.028016e-01 0.89064
8 4 1.362127e+00 9.646588e-02 -2.953666e-01 -1.299670e-01 2.750683e-02 1.496309e-02 1.621236e-01 3.674189e-01 1.694558e-02 1.768604e-01 0.92377
8 5 4.360637e-02 5.273507e-01 -5.878643e-01 -8.277919e-02 1.336013e+00 1.756848e-02 6.253131e-01 1.358198e+00 3.779334e-02 4.803578e-01 0.94084
8 6 2.251631e+00 -1.218175e+00 -5.475816e-01 3.014285e-01 1.634329e+00 -7.652272e-03 6.444620e-01 1.540731e+00 2.635283e-02 4.850513e-01 0.92387
8 7 -6.076540e-01 3.305252e-01 1.344023e-01 4.882027e-02 3.284135e+00 3.630749e-03 6.318609e-01 2.061262e+00 -8.015682e-02 2.045887e-01 0.91829
9 1 -5.066641e-01 1.329285e+00 9.461658e-01 -3.763959e-01 2.240101e+00 2.978094e-02 7.368882e-01 1.972342e-01 3.197202e-02 1.768036e-01 0.81157
9 2 9.160371e-02 1.003259e+00 -1.122999e+00 -3.433058e-01 8.014770e-01 3.033769e-02 5.132520e-01 4.999701e-01 2.610510e-02 2.586349e-01 0.82344
9 3 6.441270e-01 6.074673e-01 -8.280261e-01 -2.502328e-01 3.001515e-01 2.354826e-02 3.102515e-01 4.242577e-01 2.217524e-02 2.137150e-01 0.86982
9 4 9.621762e-01 3.067436e-01 -4.664940e-01 -1.727513e-01 1.712550e-01 1.775645e-02 2.018135e-01 4.221124e-01 1.508525e-02 1.801912e-01 0.90089
9 5 -5.807546e-01 8.358583e-01 -9.227595e-01 -1.324629e-01 1.486863e+00 2.185117e-02 6.931474e-01 1.487158e+00 4.400470e-02 5.199000e-01 0.92804
9 6 9.998263e-01 -5.206567e-01 -9.205089e-01 1.792725e-01 1.868123e+00 1.036909e-03 7.270077e-01 1.728786e+00 2.663989e-02 5.212735e-01 0.91573
9 7 -1.395993e+00 6.936340e-01 -5.686723e-01 6.332513e-04 3.424236e+00 7.041325e-03 7.011817e-01 2.342476e+00 -8.797079e-02 2.473096e-01 0.92407
10 1 6.079346e-01 5.764501e-01 8.115558e-01 -2.184695e-01 2.348472e+00 1.915197e-02 7.311287e-01 3.106179e-01 2.082615e-02 1.686596e-01 0.838
10 2 -7.268649e-01 1.466423e+00 -1.092733e+00 -4.325192e-01 1.038170e+00 3.593924e-02 5.489411e-01 5.537068e-01 2.192478e-02 2.517090e-01 0.83772
10 3 2.556828e-01 7.956948e-01 -1.003445e+00 -2.837742e-01 5.126386e-01 2.553391e-02 3.518927e-01 5.288852e-01 1.452997e-02 2.116595e-01 0.86064
10 4 7.065774e-01 4.202723e-01 -6.093885e-01 -1.918654e-01 3.621433e-01 1.879915e-02 2.357079e-01 5.007935e-01 8.970089e-03 1.736803e-01 0.88263
10 5 -9.592237e-01 1.000408e+00 -1.082641e+00 -1.462954e-01 1.769092e+00 2.324966e-02 7.676456e-01 1.630683e+00 4.522151e-02 5.448054e-01 0.92157
10 6 -5.821246e-02 8.253724e-02 -1.046605e+00 8.037739e-02 2.225443e+00 7.622167e-03 8.093918e-01 1.903019e+00 2.274182e-02 5.378206e-01 0.91615
10 7 -2.338184e+00 1.223269e+00 -7.855223e-01 -8.592414e-02 3.651338e+00 1.259107e-02 7.501100e-01 2.502162e+00 -9.368997e-02 2.579936e-01 0.93182
11 1 1.584285e+00 -9.233146e-02 4.913702e-01 -7.566464e-02 2.481888e+00 9.490070e-03 7.329837e-01 4.912006e-01 3.914822e-03 1.591060e-01 0.85812
11 2 -1.233141e+00 1.724787e+00 -9.581509e-01 -4.738511e-01 1.320496e+00 3.796105e-02 5.644950e-01 6.102217e-01 1.219256e-02 2.222982e-01 0.85947
11 3 1.827068e-01 7.765828e-01 -1.183786e+00 -2.699423e-01 7.669048e-01 2.408920e-02 3.812356e-01 6.690887e-01 2.258100e-04 1.942093e-01 0.86762
11 4 6.871972e-01 3.742267e-01 -7.970033e-01 -1.730136e-01 5.907723e-01 1.701475e-02 2.586978e-01 6.313598e-01 -4.919297e-03 1.544219e-01 0.87667
11 5 -9.381048e-01 9.140218e-01 -1.186769e+00 -1.013428e-01 2.127841e+00 1.981853e-02 8.292878e-01 1.814266e+00 3.555110e-02 5.461664e-01 0.92408
11 6 -4.972398e-01 3.141483e-01 -1.051545e+00 5.992790e-02 2.625577e+00 8.281900e-03 8.692018e-01 2.075338e+00 1.109089e-02 5.285473e-01 0.92303
11 7 -2.912724e+00 1.566532e+00 -7.009429e-01 -1.358704e-01 3.905999e+00 1.504579e-02 7.656498e-01 2.588276e+00 -1.026651e-01 2.357367e-01 0.94093
12 1 3.795952e+00 -1.609479e+00 -3.957772e-01 2.542400e-01 2.626393e+00 -1.324950e-02 7.607905e-01 8.914563e-01 -3.213962e-02 1.607167e-01 0.81496
12 2 9.414399e-01 7.276113e-03 -2.638211e+00 -4.937976e-02 2.464290e+00 5.299087e-03 5.154030e-01 1.694110e+00 -1.217717e-01 1.060381e-02 0.92512
12 3 -1.826336e+00 2.001220e+00 -1.062499e+00 -5.044658e-01 1.421460e+00 3.846524e-02 3.475318e-01 8.563474e-01 -3.515948e-02 7.200227e-02 0.93612
12 4 1.091590e-01 6.744938e-01 -1.006791e+00 -2.180554e-01 1.035742e+00 1.896149e-02 2.513562e-01 8.626383e-01 -3.680987e-02 8.137101e-02 0.90779
12 5 -2.650530e-01 3.934711e-01 -1.363908e+00 4.468812e-02 2.491388e+00 8.265016e-03 8.734229e-01 2.064115e+00 1.198487e-02 5.295803e-01 0.92943
12 6 1.403355e+00 -9.923685e-01 -1.340616e+00 3.576970e-01 2.578394e+00 -1.248317e-02 9.303741e-01 2.201778e+00 1.047900e-02 5.851626e-01 0.89932
12 7 2.509748e+00 -1.973148e+00 -1.179463e+00 5.883023e-01 2.987827e+00 -2.935602e-02 1.016046e+00 2.393637e+00 -1.646417e-02 5.747707e-01 0.92602
13 1 4.746942e+00 -2.558156e+00 -1.186727e+00 4.880320e-01 2.900345e+00 -2.996777e-02 6.265849e-01 1.273604e+00 -7.816415e-02 5.587602e-02 0.88353
13 2 4.831451e+00 -2.725641e+00 -2.085771e+00 5.398800e-01 2.502417e+00 -3.465279e-02 3.921621e-01 1.614453e+00 -1.319834e-01 -6.435277e-02 0.95219
13 3 3.992977e+00 -2.157449e+00 -1.657250e+00 4.149461e-01 1.869796e+00 -2.561423e-02 2.871417e-01 1.343384e+00 -1.034915e-01 -3.756972e-02 0.95133
13 4 2.400486e+00 -1.090502e+00 -1.266064e+00 1.786835e-01 1.361613e+00 -8.450192e-03 2.460354e-01 1.080190e+00 -6.583955e-02 2.563368e-02 0.92219
13 5 -9.144487e-01 8.800053e-01 -1.485896e+00 -2.665061e-02 2.909180e+00 1.033890e-02 9.077201e-01 2.325317e+00 -2.244458e-02 4.854397e-01 0.94403
13 6 -3.069663e+00 2.301743e+00 -2.144761e+00 -3.388892e-01 2.369229e+00 3.262324e-02 9.135671e-01 2.383983e+00 -4.136037e-03 5.984553e-01 0.9073
13 7 -1.111552e+01 7.990864e+00 -3.070002e+00 -1.549400e+00 1.867375e+00 1.118725e-01 9.435024e-01 2.560170e+00 -1.446240e-02 6.814598e-01 0.88757
14 1 1.242540e+00 -2.369845e-01 -1.419802e+00 8.260015e-03 2.972485e+00 1.156809e-03 5.212952e-01 1.408200e+00 -1.014738e-01 -2.296493e-02 0.89436
14 2 1.368811e+00 -4.145601e-01 -2.280856e+00 5.740560e-02 2.450007e+00 -2.985853e-03 3.109832e-01 1.682819e+00 -1.424004e-01 -1.039245e-01 0.96163
14 3 1.228625e+00 -3.074402e-01 -1.908024e+00 3.084710e-02 1.861609e+00 -7.324499e-04 2.322341e-01 1.454456e+00 -1.170960e-01 -6.583994e-02 0.95624
14 4 9.318505e-01 -8.583122e-02 -1.478347e+00 -2.767564e-02 1.499320e+00 4.344851e-03 2.301372e-01 1.230955e+00 -8.575766e-02 -3.931169e-03 0.94057
14 5 1.432796e+00 -7.039165e-01 -1.539426e+00 3.447957e-01 3.325479e+00 -1.743970e-02 9.270213e-01 2.570657e+00 -6.012586e-02 4.282312e-01 0.94925
14 6 8.747952e+00 -5.660286e+00 -1.996150e+00 1.383810e+00 2.770045e+00 -8.538469e-02 9.037252e-01 2.494688e+00 -4.085627e-02 5.001229e-01 0.86508
14 7 2.967365e+00 -1.483125e+00 -2.468505e+00 4.985097e-01 2.507725e+00 -2.839179e-02 9.571531e-01 2.559916e+00 -4.860509e-02 5.434484e-01 0.85267
15 1 1.994942e+00 -6.555317e-01 -9.786397e-01 9.222377e-02 2.990598e+00 -4.051119e-03 5.103009e-01 1.267481e+00 -9.162911e-02 -3.369420e-02 0.89008
15 2 1.963584e+00 -7.859760e-01 -2.306874e+00 1.416005e-01 2.554944e+00 -8.779408e-03 3.022099e-01 1.737735e+00 -1.513754e-01 -1.268836e-01 0.9563
15 3 1.458040e+00 -4.180364e-01 -1.778768e+00 5.630375e-02 1.907537e+00 -2.330213e-03 2.224492e-01 1.426373e+00 -1.165456e-01 -7.909000e-02 0.95286
15 4 2.745900e+00 -1.327290e+00 -1.972995e+00 2.563148e-01 1.682334e+00 -1.611474e-02 2.078639e-01 1.520069e+00 -1.218943e-01 -4.948951e-02 0.93794
15 5 1.826867e+00 -9.040947e-01 -1.520564e+00 4.006556e-01 3.544015e+00 -2.263840e-02 9.401753e-01 2.685085e+00 -7.891802e-02 4.005182e-01 0.94983
15 6 9.225378e+00 -5.938465e+00 -2.074143e+00 1.459699e+00 3.004200e+00 -9.190359e-02 9.271197e-01 2.649451e+00 -6.093322e-02 4.793848e-01 0.86549
15 7 3.091311e+00 -1.498489e+00 -2.306911e+00 5.168763e-01 2.754097e+00 -3.127343e-02 9.674050e-01 2.639684e+00 -6.627824e-02 5.084716e-01 0.85807
16 1 3.455794e+00 -1.486545e+00 -4.241646e-01 2.553252e-01 2.897685e+00 -1.393926e-02 4.953749e-01 1.043180e+00 -7.239104e-02 -3.229877e-02 0.88572
16 2 2.605198e+00 -1.151861e+00 -2.244961e+00 2.195486e-01 2.583749e+00 -1.359540e-02 2.940331e-01 1.720754e+00 -1.512312e-01 -1.393042e-01 0.94717
16 3 1.819715e+00 -5.552250e-01 -1.492429e+00 7.627047e-02 1.815396e+00 -2.575816e-03 2.228693e-01 1.269107e+00 -9.778961e-02 -6.373164e-02 0.94071
16 4 1.261123e+00 -1.728350e-01 -1.180981e+00 -1.378674e-02 1.472255e+00 4.495729e-03 2.288470e-01 1.093459e+00 -6.965027e-02 1.820913e-03 0.92354
16 5 1.662075e+00 -5.909674e-01 -1.032808e+00 3.036510e-01 3.420686e+00 -1.432329e-02 9.422668e-01 2.419683e+00 -5.111377e-02 4.185730e-01 0.93755
16 6 9.062144e+00 -5.641461e+00 -1.647969e+00 1.368319e+00 2.893126e+00 -8.392754e-02 9.367054e-01 2.409603e+00 -3.379991e-02 5.021036e-01 0.83891
16 7 2.972117e+00 -1.238725e+00 -1.911060e+00 4.349748e-01 2.623632e+00 -2.402190e-02 9.727758e-01 2.407711e+00 -3.981335e-02 5.321525e-01 0.8466
17 1 6.065370e+00 -3.036624e+00 -1.204084e-02 5.641127e-01 2.756971e+00 -3.286474e-02 4.653054e-01 8.509755e-01 -5.683971e-02 -3.862627e-02 0.87655
17 2 3.923084e+00 -1.907470e+00 -2.128049e+00 3.679667e-01 2.450187e+00 -2.189545e-02 2.936385e-01 1.601658e+00 -1.351094e-01 -1.236702e-01 0.92472
17 3 1.621678e+00 -2.015437e-01 -6.733390e-01 -3.357035e-02 1.440304e+00 8.215388e-03 2.342461e-01 7.653045e-01 -3.439944e-02 5.107264e-03 0.91385
17 4 1.111911e+00 1.243552e-01 -5.019693e-01 -1.052095e-01 1.134048e+00 1.355940e-02 2.308674e-01 6.698652e-01 -1.594878e-02 5.972407e-02 0.88394
17 5 1.171040e+00 2.179109e-02 -3.418401e-01 1.133921e-01 2.971660e+00 2.502135e-03 9.291163e-01 1.896026e+00 9.636920e-03 4.748118e-01 0.90471
17 6 8.870233e+00 -5.244293e+00 -1.112737e+00 1.223429e+00 2.451975e+00 -6.975656e-02 9.381103e-01 1.935719e+00 2.655594e-02 5.699057e-01 0.78699
17 7 3.603072e+00 -1.434442e+00 -1.722606e+00 4.197760e-01 2.101419e+00 -1.864621e-02 9.686834e-01 2.030916e+00 1.464855e-02 6.122669e-01 0.83498
18 1 1.082397e+01 -5.971812e+00 -4.770886e-01 1.146059e+00 2.577005e+00 -6.751868e-02 4.057286e-01 8.848197e-01 -6.116616e-02 -7.655496e-02 0.83153
18 2 6.135939e+00 -3.064357e+00 -1.681550e+00 5.476164e-01 1.634596e+00 -2.726348e-02 2.771841e-01 1.006313e+00 -5.079006e-02 -1.432417e-02 0.83838
18 3 1.724760e+00 -8.414072e-03 -3.017019e-01 -1.301847e-01 7.400475e-01 2.104870e-02 2.109209e-01 2.444144e-01 4.343080e-02 1.012454e-01 0.85289
18 4 2.442864e+00 -5.483793e-01 -5.906341e-01 -6.476063e-03 5.333321e-01 1.231103e-02 2.145601e-01 3.817825e-01 3.833219e-02 1.472210e-01 0.83846
18 5 1.344505e+00 9.699000e-02 -6.337698e-01 5.032382e-02 2.277124e+00 1.129115e-02 9.031503e-01 1.626972e+00 5.876497e-02 5.640278e-01 0.85781
18 6 9.111154e+00 -5.207996e+00 -1.319588e+00 1.167587e+00 1.705285e+00 -6.148092e-02 8.926992e-01 1.622487e+00 7.799110e-02 6.537055e-01 0.76933
18 7 3.367097e+00 -1.055645e+00 -1.444817e+00 2.936776e-01 1.358158e+00 -6.841480e-03 8.764732e-01 1.570444e+00 6.559029e-02 6.528755e-01 0.85086
19 1 1.045173e+01 -4.941345e+00 2.088544e+00 7.367673e-01 1.576796e+00 -2.060349e-02 3.211369e-01 -6.732945e-01 1.313307e-01 3.047173e-02 0.62546
19 2 3.703805e+00 -5.375542e-01 8.605968e-01 -2.386394e-01 2.000571e-01 5.240058e-02 2.518747e-01 -8.312531e-01 2.203020e-01 2.530480e-01 0.76811
19 3 3.272628e+00 -4.558256e-01 -2.863843e-01 -2.107743e-01 -2.212628e-01 4.758636e-02 2.495467e-01 -3.954864e-01 1.979005e-01 3.445951e-01 0.83723
19 4 4.151799e+00 -1.163107e+00 -7.815812e-01 -3.602281e-02 -3.369410e-01 3.421682e-02 2.939125e-01 -1.301308e-01 1.840421e-01 4.095479e-01 0.84295
19 5 2.430916e+00 -5.853717e-01 -1.651001e+00 1.615683e-01 1.360993e+00 7.354574e-03 8.027559e-01 1.550617e+00 8.437972e-02 6.310538e-01 0.8633
19 6 9.589982e+00 -5.442344e+00 -1.902050e+00 1.178300e+00 7.118181e-01 -5.898198e-02 7.425192e-01 1.372019e+00 1.104244e-01 6.923491e-01 0.81967
19 7 3.876753e+00 -1.278652e+00 -1.571642e+00 3.014910e-01 3.879600e-01 -5.220906e-03 6.825008e-01 1.184419e+00 9.635829e-02 6.463116e-01 0.89373
20 1 8.818423e+00 -3.885616e+00 1.977768e+00 5.171642e-01 3.360926e-01 -6.969479e-05 3.518455e-01 -1.178708e+00 2.501245e-01 2.894164e-01 0.40912
20 2 4.558966e+00 -1.402672e+00 -9.888905e-01 3.233388e-02 -4.743974e-01 3.207056e-02 1.957514e-01 -3.101404e-01 2.026337e-01 3.518889e-01 0.78776
20 3 5.278691e+00 -1.976263e+00 -2.136653e+00 1.696426e-01 -8.774690e-01 2.264980e-02 2.230293e-01 1.211223e-01 1.908473e-01 4.719043e-01 0.87646
20 4 7.510426e+00 -3.536111e+00 -2.728126e+00 5.091674e-01 -9.617025e-01 -2.305694e-05 2.979294e-01 4.199695e-01 1.829638e-01 5.617737e-01 0.8876
20 5 4.348244e+00 -1.800377e+00 -2.704540e+00 3.893456e-01 3.900327e-01 -2.537683e-03 6.766795e-01 1.545482e+00 1.164300e-01 7.199897e-01 0.91058
20 6 1.124045e+01 -6.484624e+00 -2.881991e+00 1.373009e+00 -2.577640e-01 -6.712893e-02 6.072741e-01 1.352019e+00 1.407729e-01 7.735385e-01 0.8799
20 7 4.717043e+00 -1.737761e+00 -1.978561e+00 3.722787e-01 -4.753988e-01 -6.761770e-03 5.151032e-01 1.002508e+00 1.231074e-01 6.697817e-01 0.92448
21 1 4.640154e+00 -2.058466e+00 -2.997508e+00 3.229845e-01 -3.267520e-01 7.138257e-03 5.035649e-01 4.081604e-01 1.794630e-01 5.391301e-01 0.67759
21 2 2.121799e+00 -2.683458e-01 -3.333377e+00 -5.632149e-02 -8.802042e-01 2.931274e-02 1.548270e-01 6.114660e-01 1.282823e-01 4.094238e-01 0.86667
21 3 6.281104e+00 -3.060090e+00 -3.968978e+00 5.414357e-01 -1.146769e+00 -1.021923e-02 1.901006e-01 9.614629e-01 1.213459e-01 5.239261e-01 0.92484
21 4 8.247573e+00 -4.368089e+00 -3.711893e+00 8.203851e-01 -1.261375e+00 -2.889684e-02 2.156578e-01 9.815061e-01 1.295587e-01 5.859233e-01 0.94245
21 5 6.556615e+00 -3.025109e+00 -2.490098e+00 6.514720e-01 -2.943759e-02 -2.238848e-02 5.300817e-01 1.512397e+00 1.036178e-01 6.936618e-01 0.94549
21 6 6.664013e+00 -3.151148e+00 -2.756030e+00 6.730895e-01 -5.035543e-01 -2.392248e-02 5.015257e-01 1.426664e+00 1.177137e-01 7.459815e-01 0.93986
21 7 2.741656e+00 -2.924058e-01 -2.782497e+00 4.167455e-02 -1.626785e+00 1.572766e-02 2.898142e-01 9.446596e-01 1.393182e-01 7.278788e-01 0.93601
22 1 -4.590363e+00 3.849984e+00 -3.307862e+00 -8.018508e-01 -5.969935e-01 6.933581e-02 2.171315e-01 6.565482e-01 9.535360e-02 3.544051e-01 0.77153
22 2 -2.609181e+00 2.507249e+00 -5.141995e+00 -5.146901e-01 -1.571547e+00 5.116362e-02 1.732032e-01 1.201708e+00 8.957430e-02 5.433813e-01 0.9206
22 3 1.080075e+00 3.236284e-01 -3.718565e+00 -5.764888e-02 -1.178401e+00 1.959957e-02 1.928291e-01 1.188282e+00 8.092215e-02 5.341144e-01 0.95301
22 4 4.479736e+00 -1.791747e+00 -2.497527e+00 3.837625e-01 -7.002120e-01 -9.574992e-03 2.565598e-01 1.158360e+00 8.196728e-02 5.367337e-01 0.96423
22 5 7.115847e+00 -2.980620e+00 8.689305e-02 6.478047e-01 7.610591e-01 -2.795117e-02 4.940494e-01 1.193656e+00 7.286485e-02 5.257920e-01 0.96623
22 6 1.034411e+01 -5.385961e+00 1.065337e-01 1.161385e+00 7.716018e-01 -6.171584e-02 4.895997e-01 1.208054e+00 7.274434e-02 5.283104e-01 0.96174
22 7 1.527996e+01 -8.823876e+00 5.233917e-01 1.885894e+00 9.122497e-01 -1.095329e-01 4.840952e-01 1.107045e+00 7.496147e-02 4.965770e-01 0.94807
23 1 -9.118623e+00 6.724055e+00 -3.965887e+00 -1.375352e+00 -8.335226e-01 1.046972e-01 2.009562e-01 8.231081e-01 8.124472e-02 3.770911e-01 0.80809
23 2 -3.740883e+00 3.467194e+00 -3.997971e+00 -7.001114e-01 -1.237937e+00 5.863242e-02 1.334926e-01 1.117128e+00 6.114471e-02 4.509570e-01 0.93032
23 3 5.376906e-01 9.692099e-01 -2.061007e+00 -1.823208e-01 -5.029704e-01 2.387347e-02 2.247389e-01 1.071661e+00 5.850010e-02 4.461086e-01 0.95698
23 4 3.849287e+00 -1.117338e+00 -7.892561e-01 2.559385e-01 8.074780e-02 -5.009557e-03 3.151658e-01 1.066045e+00 5.871764e-02 4.531396e-01 0.96842
23 5 6.541330e+00 -2.454719e+00 1.481125e+00 5.404290e-01 1.340032e+00 -2.428085e-02 5.163963e-01 1.067777e+00 5.079943e-02 4.374805e-01 0.9716
23 6 9.778494e+00 -4.861683e+00 1.537440e+00 1.053167e+00 1.355536e+00 -5.796366e-02 5.118758e-01 1.069339e+00 5.137129e-02 4.387966e-01 0.96811
23 7 1.467645e+01 -8.275620e+00 1.985977e+00 1.769856e+00 1.458738e+00 -1.049781e-01 4.988052e-01 9.306013e-01 5.826336e-02 4.082278e-01 0.95666

View File

@@ -1,110 +1,173 @@
import numpy as np
import h5py as h5
from scipy.interpolate import RectBivariateSpline
from kaipy.rcm.wmutils.wmData import wmParams
#
def genWM(params, useWM=True):
def genWM(params):
import os
fInChorus = 'DWang_chorus_lifetime.h5'
fInChorus = 'chorus_polynomial.txt'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
fInChorus = os.path.join(__location__,fInChorus)
fInTDS = 'tauTDS.txt'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
fInTDS = os.path.join(__location__,fInTDS)
print("Reading %s and %s"%(fInChorus,fInTDS))
if useWM:
return readWM(params,fInChorus,fInTDS)
else:
return toyWM(params)
print("Reading %s"%fInChorus)
return genChorus(params,fInChorus)
# Add wpi-induced electron lifetime model to input file and create an output file
# Writes arrays to file in rcmconfig.h5 format
def genh5(fIn, fOut, inputParams, useWM=True):
def genh5(fIn, fOut, inputParams):
if fIn != fOut:
if fIn != fOut:
oH5 = h5.File(fOut, 'w')
iH5 = h5.File(fIn,'r')
iH5 = h5.File(fIn, 'r')
for Q in iH5.keys():
sQ = str(Q)
oH5.create_dataset(sQ, data=iH5[sQ])
oH5.attrs.update(iH5.attrs)
else:
oH5 = h5.File(fOut, 'r+')
if not ('Tau1i' in oH5.keys()):
kpi, mlti, li, eki, tau1i, tau2i, ekTDSi, tauTDSi = genWM(inputParams, useWM = useWM)
if not ('Taui' in oH5.keys()):
kpi, mlti, li, eki, taui = genWM(inputParams)
attrs = inputParams.getAttrs()
oH5.create_dataset('Kpi', data=kpi)
oH5.create_dataset('MLTi', data=mlti)
oH5.create_dataset('Li', data=li)
oH5.create_dataset('Eki', data=eki)
oH5.create_dataset('Tau1i', data=tau1i)
oH5.create_dataset('Tau2i', data=tau2i)
oH5.create_dataset('EkTDSi', data=ekTDSi)
oH5.create_dataset('TauTDSi', data=tauTDSi)
oH5.create_dataset('Taui', data=taui)
for key in attrs.keys():
oH5.attrs[key] = attrs[key]
oH5.close()
def readWM(params,fInChorus,fInTDS):
# add electron lifetime for the chorus wave loss
f5 = h5.File(fInChorus, 'r')
kpi=f5['Kp_1D'][:][0]
mlti=np.append(f5['MLT_1D'][:][0],24.)
li=f5['L_1D'][:][0]
eki=10.**(f5['E_1D'][:][0]) # in MeV
print('shape of eki:',eki.shape)
tau1i=(10.**(f5['Tau1_4D'][:]))*24.*3600. # in second
tau2i=(10.**(f5['Tau2_4D'][:]))*24.*3600.
#print ("kpi",kpi,"mlti",mlti,"li",li,"eki",eki)
nk,nm,nl,ne = tau1i.shape
#expand mlt from 0:23 to 0:24
tau1ai = np.array([np.append(tau1i[0,:,:,:],np.array([tau1i[0,0,:,:]]),0)])
tau2ai = np.array([np.append(tau2i[0,:,:,:],np.array([tau2i[0,0,:,:]]),0)])
for i in range(1,7):
tau1ai=np.append(tau1ai,np.array([np.append(tau1i[i,:,:,:],np.array([tau1i[i,0,:,:]]),0)]),0)
tau2ai=np.append(tau2ai,np.array([np.append(tau2i[i,:,:,:],np.array([tau2i[i,0,:,:]]),0)]),0)
tau1ai = tau1ai.T
tau2ai = tau2ai.T
f5.close()
#read parameters of the polynomial fit, Wang+,2023
def readPoly(fIn):
table = []
with open(fIn, 'r') as file:
# Skip the first row
next(file)
for line in file:
row = line.strip().split('\t')[2:-1] # Discard the first two elements of each row
row = [float(x) for x in row] # Convert the strings to float
rowLen = len(row)
table.append(np.array(row))
return (rowLen,np.array(table))
#add electron lifetime for the Time Domain Structure loss
tdmArrays=np.loadtxt(fInTDS)
#print(tdm_arrays)
ekTDSi = tdmArrays[:,0].T/1.e6 #in MeV
print ('shape of ekiTDS',ekTDSi.shape)
tauTDSi = tdmArrays[:,2].T*24.*3600. #in second, read in the electron lifetime against TDS with Ew= 4mV/m
print ('tauTDSi[0]',tauTDSi[0])
return kpi,mlti,li,eki,tau1ai,tau2ai,ekTDSi,tauTDSi
#Chorus polynomial fit for the electron lifetime
def ChorusPoly(Li,Eki,polyArray):
# The 3-rd Order Polynomial Fit Coefficients of Electron Lifetime Caused by Interaction with Chorus Waves
#(https://doi.org/will be provided)
# Dedong Wang et al., in preparation
# For each Kp (0,1,2...,maxKp) and each MLT (0,1,2,...,23), tau has a polynomial fit of Ek and L.
lenKp,lenMLT,lenParam = polyArray.shape
#Extend polyArray
polyArrayX = polyArray[:,:,:,np.newaxis,np.newaxis]
#Extend Li and Ki
lenL = len(Li)
lenEki = len(Eki)
Lx = np.tile(Li, (lenEki, 1)).T
Lx = Lx[np.newaxis,np.newaxis,:,:]
Ex = np.tile(Eki, (lenL, 1))
Ex = Ex[np.newaxis,np.newaxis,:,:]
tau = np.ones((lenKp,lenMLT,lenL,lenEki))
c0 = polyArrayX[:,:,0,:,:]#Intercept
c1 = polyArrayX[:,:,1,:,:] #L
c2 = polyArrayX[:,:,2,:,:]#log10(E)
c3 = polyArrayX[:,:,3,:,:]# L^2
c4 = polyArrayX[:,:,4,:,:]#log10(E)^2
c5 = polyArrayX[:,:,5,:,:]#L^3
c6 = polyArrayX[:,:,6,:,:]#log10(E)^3
c7 = polyArrayX[:,:,7,:,:]#log10(E)*L
c8 = polyArrayX[:,:,8,:,:]#log10(E)*L^2
c9 = polyArrayX[:,:,9,:,:]#log10(E)^2*L
tau = c0*tau+\
c1*Lx+c2*Ex+\
c3*np.power(Lx,2)+\
c4*np.power(Ex,2)+\
c5*np.power(Lx,3)+\
c6*np.power(Ex,3)+\
c7*Lx*Ex+\
c8*np.power(Lx,2)*Ex+\
c9*Lx*np.power(Ex,2) #in log10(days)
tau = 10.0**tau*(60.*60.*24.) #in seconds
return tau
def ReSample(L,MLT,Qp,xMLT):
Nr,Np = Qp.shape
#Add ghosts in MLT to handle periodic boundary
Ng = 2
Npg = Np+Ng*2
gMLT = np.arange(0-Ng,24+Ng+1)
Qpg = np.zeros((Nr,Npg))
#Set center and then left/right strips
Qpg[:,2:-2] = Qp
Qpg[:,1] = Qp[:,-1]
Qpg[:,0] = Qp[:,-2]
Qpg[:,-1] = Qp[:,0]
Qpg[:,-2] = Qp[:,1]
Q = np.log10(Qpg)
upQ = RectBivariateSpline(L,gMLT,Q,s=10)
Qu = upQ(L,xMLT)
xQp = 10.0**(Qu)
#Enforce equality at overlap point
tauP = 0.5*(xQp[:,0]+xQp[:,-1])
xQp[:, 0] = tauP
xQp[:,-1] = tauP
return xQp
def genChorus(params,fInChorus):
print("Dimension of parameters in Chorus wave model, Kp:",params.nKp,"MLT:",params.nMLT,"L:",params.nL,"Ek:",params.nEk)
dimKp = params.nKp #maximum Kp allowed
rowLen,paramArray = readPoly(fInChorus)
polyArray = paramArray.reshape(24,7,rowLen) #dim MLT: 24, Dim Kp: 7
polyArray = polyArray[:,:dimKp,:] #use Kp = 1,2,...,maxKp
polyArray = polyArray.transpose(1, 0, 2) #shape (,24,rowLen)
lenMLT = 24
#Kpi
startValue = 1.0 #Kp index in real number
endValue = float(dimKp)
lenKp = dimKp
Kpi = np.linspace(startValue, endValue, num=lenKp)
#Eki
startValue = 1.0e-3 #in MeV
endValue = 2.0
lenEk = 155
Eki = np.linspace(np.log10(startValue), np.log10(endValue), lenEk) #in log10(MeV)
#Li
startValue = 3.0 #in Re
endValue = 7.0
lenL = 41
Li = np.linspace(startValue, endValue, num=lenL)
#Tau from polynomial fit
tauP = ChorusPoly(Li,Eki,polyArray)
#expand MLT from 0-23 to 0-24
extraMLT0 = tauP[:, 0, :, :][:,np.newaxis,:,:]
tauE = np.concatenate((tauP, extraMLT0), axis=1)
tauE = tauE.T
#Interpolation in the MLT dimesion
xFac = 4
lenMLTx = lenMLT*xFac+1 # 97
MLTi = np.linspace(0,24,lenMLT+1)
xMLTi = np.linspace(0,24,lenMLTx)
tauX = np.zeros((lenEk,lenL,lenMLTx,lenKp))
# Smoothing in MLT
for i, j in np.ndindex(tauX.shape[0], tauX.shape[3]):
Q = tauE[i, :, :, j]
tauX[i, :, :, j] = ReSample(Li, MLTi, Q, xMLTi)
Eki = 10.0**Eki #in MeV
return Kpi,xMLTi,Li,Eki,tauX
def toyWM(params):
nKpi = params.nKp
nMLTi = params.nMLT
nLi = params.nL
nEki = params.nEk
kpi = np.linspace(1,7,nKpi)
mlti = np.linspace(0,24,nMLTi) #Note the dimension of MLT is 25
li = np.linspace(3.,7.,nLi)
eki = np.exp(np.linspace(-3,0.1,nEki)) #in MeV
#print ("kpi",kpi,"mlti",mlti,"li",li,"eki",eki)
tau1i = np.zeros((nKpi,nMLTi,nLi,nEki))
tau2i = np.zeros((nKpi,nMLTi,nLi,nEki)).T
tau1i = kpi[:,None,None,None]*mlti[None,:,None,None]*li[None,None,:,None]*eki[None,None,None,:]
tau1i = tau1i.T
return kpi,mlti,li,eki,tau1i,tau2i

View File

@@ -4,14 +4,12 @@ import numpy as np
class wmParams:
#All energies in eV
def __init__(self, dim = 4, nKp = 7, nMLT = 25, nL = 41, nEk = 155, dimTDS = 1, nEkTDS = 109):
def __init__(self, dim = 4, nKp = 6, nMLT = 97, nL = 41, nEk = 155):
self.dim = dim
self.nKp = nKp
self.nMLT = nMLT
self.nL = nL
self.nEk = nEk
self.dimTDS = dimTDS
self.nEkTDS = nEkTDS
def getAttrs(self):
return {
'tauDim': self.dim,
@@ -19,7 +17,5 @@ class wmParams:
'nMLT': self.nMLT,
'nL': self.nL,
'nEk': self.nEk,
'tauTDSDim': self.dimTDS,
'nEkTDS': self.nEkTDS
}

View File

@@ -40,6 +40,10 @@ class remix:
'max':10},
'jhall' : {'min':-2,
'max':2},
'gtype' : {'min':0,
'max':1},
'npsp' : {'min':0,
'max':1.e3},
'Menergy' : {'min':0,
'max':20},
'Mflux' : {'min':0,
@@ -85,6 +89,10 @@ class remix:
self.variables['sigmah']['data'] = self.ion['Hall conductance '+h]
self.variables['energy']['data'] = self.ion['Average energy '+h]
self.variables['flux']['data'] = self.ion['Number flux '+h]
if 'RCM grid type '+h in self.ion.keys():
self.variables['gtype']['data'] = self.ion['RCM grid type '+h]
if 'RCM plasmasphere density '+h in self.ion.keys():
self.variables['npsp']['data'] = self.ion['RCM plasmasphere density '+h]*1.0e-6 # /m^3 -> /cc.
# variables['efield']['data'] = efield_n*1.e6
# variables['joule']['data'] = sigmap_n*efield_n**2*1.e-3
if 'Zhang average energy '+h in self.ion.keys():
@@ -112,6 +120,10 @@ class remix:
self.variables['sigmah']['data'] = self.ion['Hall conductance '+h][:,::-1]
self.variables['energy']['data'] = self.ion['Average energy '+h][:,::-1]
self.variables['flux']['data'] = self.ion['Number flux '+h][:,::-1]
if 'RCM grid type '+h in self.ion.keys():
self.variables['gtype']['data'] = self.ion['RCM grid type '+h][:,::-1]
if 'RCM plasmasphere density '+h in self.ion.keys():
self.variables['npsp']['data'] = self.ion['RCM plasmasphere density '+h][:,::-1]*1.0e-6 # /m^3 -> /cc.
if 'Zhang average energy '+h in self.ion.keys():
self.variables['Menergy']['data'] = self.ion['Zhang average energy '+h][:,::-1]
self.variables['Mflux']['data'] = self.ion['Zhang number flux '+h][:,::-1]
@@ -132,7 +144,9 @@ class remix:
self.variables['Pflux']['data'][self.variables['Penergy']['data']==1.e-20] = 0.
# convert energy flux to erg/cm2/s to conform to Newell++, doi:10.1029/2009JA014326, 2009
self.variables['eflux']['data'] = self.variables['energy']['data']*self.variables['flux']['data']*1.6e-9
self.variables['eflux']['data'] = self.variables['energy']['data']*self.variables['flux']['data']*1.6e-9
# Mask out Eavg where EnFlux<0.1
# self.variables['energy']['data'][self.variables['sigmap']['data']<=2.5]=0.0
self.Initialized=True
@@ -182,7 +196,7 @@ class remix:
def plot(self,varname,
ncontours=16, # default number of potential contours
addlabels={},
gs=None,doInset=False,doCB=True,doCBVert=True):
gs=None,doInset=False,doCB=True,doCBVert=True,doGTYPE=False,doPP=False):
# define function for potential contour overplotting
# to keep code below clean and compact
@@ -223,6 +237,36 @@ class remix:
ax.text(73.*np.pi/180.,1.03*r.max(),('min: '+format_str+'\nmax: ' +format_str) %
(tmp.min() ,tmp.max()))
# define function for grid type contour overplotting
# to keep code below clean and compact
def boundary_overplot(con_name,con_level,con_color,doInset=False):
tc = 0.25*(theta[:-1,:-1]+theta[1:,:-1]+theta[:-1,1:]+theta[1:,1:])
rc = 0.25*(r[:-1,:-1]+r[1:,:-1]+r[:-1,1:]+r[1:,1:])
# trick to plot contours smoothly across the periodic boundary:
# wrap around: note, careful with theta -- need to add 2*pi to keep it ascending
# otherwise, contours mess up
tc = np.hstack([tc,2.*np.pi+tc[:,[0]]])
rc = np.hstack([rc,rc[:,[0]]])
tmp=self.variables[con_name]['data']
tmp = np.hstack([tmp,tmp[:,[0]]])
# similar trick to make contours go through the pole
# add pole
tc = np.vstack([tc[[0],:],tc])
rc = np.vstack([0.*rc[[0],:],rc])
tmp = np.vstack([tmp[0,:].mean()*np.ones_like(tmp[[0],:]),tmp])
# finally, plot
if (doInset):
LW = 0.5
alpha = 1
tOff = 0.0
else:
LW = 0.75
alpha = 1
tOff = np.pi/2.
ax.contour(tc+tOff,rc,tmp,levels=con_level,colors=con_color,linewidths=LW,alpha=alpha)
if not self.Initialized:
sys.exit("Variables should be initialized for the specific hemisphere (call init_var) prior to plotting.")
@@ -370,6 +414,10 @@ class remix:
if varname=='current':
potential_overplot(doInset)
if doGTYPE and varname=='eflux':
boundary_overplot('gtype',[0.01,0.99],'green',doInset)
if doPP and varname=='eflux':
boundary_overplot('npsp',[10],'cyan',doInset)
return ax
# mpl.rcParams['contour.negative_linestyle'] = 'solid'

View File

@@ -735,13 +735,15 @@ class DSCOVRNC(OMNI):
pop = []
f1m = []
m1m = []
fmt = '%Y%m%d'
fmt1 = '%Y%m%d%H%M%S'
fmt2 = '%Y%m%d%H%M%S'
jud0 = datetime.datetime(1970,1,1,0,0,0,0)
for f in filelist:
if f[0:2] == 'oe':
ctime = datetime.datetime.strptime(f[15:23],fmt)
if ctime >= t0 and ctime <=t1:
ctime = datetime.datetime.strptime(f[15:29],fmt1)
etime = datetime.datetime.strptime(f[31:45],fmt2)
if (ctime >= t0 and ctime <=t1) or (t0 <= ctime and ctime <= t1) or (t0 <= etime and etime <= t1):
if 'pop' in f:
pop.append(f)
if 'f1m' in f:

View File

@@ -11,6 +11,7 @@ import warnings
import math
import datetime
import json
from multiprocessing import Pool
#### NEED TO POINT TO SUPERMAG API SCRIPT
#### /glade/p/hao/msphere/gamshare/supermag/supermag_api.py
@@ -94,7 +95,7 @@ def interp_grid(values, tri, uv, d=2):
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def FetchSMData(user, start, numofdays, savefolder, badfrac=0.1, nanflags=True, doDB=True):
def FetchSMData(user, start, numofdays, savefolder, badfrac=0.1, nanflags=True, doDB=True, ncpus=1):
"""Retrieve all available SuperMagnet data for a specified period
If data has not already been downloaded, fetches data from Supermag
@@ -147,33 +148,47 @@ def FetchSMData(user, start, numofdays, savefolder, badfrac=0.1, nanflags=True,
#ZZZ
status, stations = smapi.SuperMAGGetInventory(user, startstr, extent = 86400*numofdays)
for iii in stations:
print("Fetching: ", iii)
#ZZZ
status, A = smapi.SuperMAGGetData(user, startstr, extent=86400*numofdays,
flagstring=smFlags, station = iii, FORMAT = 'list')
if status:
quickvals = np.array([x['N']['nez'] for x in A])
if (ncpus == 1):
for iii in stations:
print("Fetching: ", iii)
#ZZZ
# get rid of data if too many bad values
if np.sum(quickvals>999990.0) >= badfrac*len(quickvals):
badindex.append(False)
print(iii, "BAD")
status, A = smapi.SuperMAGGetData(user, startstr, extent=86400*numofdays,
flagstring=smFlags, station = iii, FORMAT = 'list')
if status:
quickvals = np.array([x['N']['nez'] for x in A])
# get rid of data if too many bad values
if np.sum(quickvals>999990.0) >= badfrac*len(quickvals):
badindex.append(False)
print(iii, "BAD")
else:
badindex.append(True)
STATUS.append(status)
master.append(A)
else:
badindex.append(True)
STATUS.append(status)
badindex.append(False)
master.append(['BAD'])
elif (ncpus > 1):
STATUS.append(status)
master.append(A)
else:
STATUS.append(status)
badindex.append(False)
master.append(['BAD'])
fetch_args = [(user,startstr,numofdays,smFlags,badfrac,station) for station in stations]
with Pool(processes=ncpus) as pool:
results = pool.starmap(doFetch, fetch_args)
STATUS, badindex, master = zip(*results)
else:
print("Error invalid ncpu count: ",ncpus)
badindex = np.array(badindex)
master, stations = np.array(master)[badindex], np.array(stations)[badindex]
print("Done Fetching")
# Make the Supermag data a dict for saving later
output = {}
for i in master:
@@ -237,6 +252,33 @@ def FetchSMData(user, start, numofdays, savefolder, badfrac=0.1, nanflags=True,
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def doFetch(user, startstr, numofdays, smFlags, badfrac, iii):
print("Fetching: ", iii)
#ZZZ
status, A = smapi.SuperMAGGetData(user, startstr, extent=86400*numofdays,
flagstring=smFlags, station = iii, FORMAT = 'list')
if status:
quickvals = np.array([x['N']['nez'] for x in A])
# get rid of data if too many bad values
if np.sum(quickvals>999990.0) >= badfrac*len(quickvals):
badindex = False
print(iii, "BAD")
else:
badindex = True
master = A
else:
badindex = False
master = ['BAD']
return status,badindex,master
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def MJD2Str(m0):
"""Returns timestrings and datetime objects for simulation float"""
dtObj = Time(m0,format='mjd').datetime

356
kaipy/testing/common.py Normal file
View File

@@ -0,0 +1,356 @@
#!/usr/bin/env python
"""Common code for MAGE regression tests
This module provides common code used by scripts in the MAGE
regression test suite.
Authors
-------
Jeff Garretson
Eric Winter
"""
# Import standard modules.
import argparse
import os
import subprocess
import sys
# Import 3rd-party modules.
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
# Import project modules.
# Module constants
# Name of Slack channels to use as message target
SLACK_CHANNEL_NAME = '#kaijudev'
SLACK_TEST_CHANNEL_NAME = '#kaijudev-testing'
def create_command_line_parser(description):
"""Create the command-line argument parser.
Create the parser for command-line arguments.
Parameters
----------
description : str
Description of script
Returns
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
Raises
------
None
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'--account', default=os.environ['DERECHO_TESTING_ACCOUNT'],
help='PBS account to use for testing (default: %(default)s)'
)
parser.add_argument(
'--debug', '-d', action='store_true',
help='Print debugging output (default: %(default)s).'
)
parser.add_argument(
'--force', '-f', action='store_true',
help='Force all tests to run (default: %(default)s).'
)
parser.add_argument(
'--loud', '-l', action='store_true',
help='Enable loud mode (post results to Slack) (default: %(default)s).'
)
parser.add_argument(
'--test', '-t', action='store_true',
help='Enable testing mode (default: %(default)s).'
)
parser.add_argument(
'--verbose', '-v', action='store_true',
help='Print verbose output (default: %(default)s).'
)
return parser
def run_mage_test_script(test_script, args):
"""Run a single MAGE test script.
Run a single MAGE test script.
Parameters
----------
test_script : str
Path to test script to run
args : dict
Options provided on command line
Returns
-------
cproc : subprocess.CompletedProcess
Object containing results of running the command
Raises
------
None
"""
# Assemble command-line flags to pass to the individual test
# scripts.
test_script_options = ''
test_script_options += f" --account {args.account}"
if args.all:
test_script_options += ' -a'
if args.debug:
test_script_options += ' -d'
if args.force:
test_script_options += ' -f'
if args.loud:
test_script_options += ' -l'
if args.test:
test_script_options += ' -t'
if args.verbose:
test_script_options += ' -v'
# Run the test script.
cmd = f"python {test_script} {test_script_options}"
cproc = subprocess.run(cmd, shell=True, check=True)
return cproc
def read_build_module_list_file(list_file):
"""Read a MAGE build module list file
Read a MAGE build module list file.
Parameters
----------
list_file : str
Path to MAGE build module list/file list file to read
Returns
-------
module_or_file_names : list of str
List of modules, or list of module list files
cmake_environment : str
Environment variables and values to set when invoking cmake with this
module set
cmake_options : str
Command-line options for cmake when building MAGE with this module set
Raises
------
None
"""
with open(list_file, encoding='utf-8') as f:
lines = f.readlines()
lines = [s.rstrip() for s in lines]
# Extract the optional cmake environment variable settings,
cmake_environment = ''
label = 'CMAKE_ENV='
if lines[0].startswith(label):
cmake_environment = lines[0][len(label):].rstrip()
lines.pop(0) # Remove cmake environment line.
# Extract the optional cmake command-line options,
cmake_options = ''
label = 'CMAKE_OPTIONS='
if lines[0].startswith(label):
cmake_options = lines[0][len(label):].rstrip()
lines.pop(0) # Remove cmake options line.
# Save the remaining lines as a module or file list.
module_or_file_names = lines
# Return the file contents.
return module_or_file_names, cmake_environment, cmake_options
# -----------------------------------------------------------------------------
# Git utilities
def git_get_branch_name():
"""Fetch the name of the current git branch,
Fetch the name of the current git branch.
Parameters
----------
None
Returns
-------
git_branch_name : str
Name of the current git branch
Raises
------
None
"""
cmd = 'git symbolic-ref --short HEAD'
cproc = subprocess.run(cmd, shell=True, check=True, text=True,
capture_output=True)
git_branch_name = cproc.stdout.rstrip()
# Return the git branch name,
return git_branch_name
def git_pull():
"""Pull the current branch from the git repository.
Pull the current branch from the git repository.
Parameters
----------
None
Returns
-------
git_pull_output : str
Output from git pull command
Raises
------
None
"""
cmd = 'git pull'
cproc = subprocess.run(cmd, shell=True, check=True, text=True,
capture_output=True)
git_pull_output = cproc.stdout.rstrip()
# Return the git pull output.
return git_pull_output
# -----------------------------------------------------------------------------
# Slack utilities
def slack_create_client():
"""Create a client for Slack communication.
Create a client for Slack communication.
Parameters
----------
None
Returns
-------
slack_client : slack_sdk.WebClient
Client for Slack communication
Raises
------
None
"""
# Get the Slack API token
slack_token = os.environ['SLACK_BOT_TOKEN']
# Create the Slack client.
slack_client = WebClient(token=slack_token)
# Return the Slack client.
return slack_client
def slack_send_message(slack_client, message, thread_ts=None, is_test=False):
"""Send a message to Slack.
Send a message to Slack. Errors during message sending are not considered
to be fatal. Errors are caught, an error message is printed, and the
program continues normally.
Parameters
----------
slack_client : slack_sdk.WebClient
Client for Slack communication
message : str
Message to send to Slack
thread_ts : XXX, default None
Set to desired Slack thread identifier (timestamp), if any
is_test : bool
If True, use the testing channel as the message target.
Returns
-------
response : XXX
Response from Slack API when posting the message.
Raises
------
None
"""
if is_test:
channel = SLACK_TEST_CHANNEL_NAME
else:
channel = SLACK_CHANNEL_NAME
try:
response = slack_client.chat_postMessage(
channel=channel,
thread_ts=thread_ts,
text=message,
)
except SlackApiError as e:
print('Sending message to Slack failed.', file=sys.stderr)
response = e.response
print(f"response = {response}", file=sys.stderr)
return response
def slack_send_image(slack_client, image_file_path, initial_comment='',
thread_ts=None, is_test=False):
"""Send an image file to Slack.
Send an image file to Slack.
Parameters
----------
slack_client : slack_sdk.WebClient
Client for Slack communication
image_file_path : str
Path to image file to send to Slack
initial_comment : str
Comment to include with image, default ''
thread_ts : XXX, default None
Set to desired Slack thread identifier (timestamp), if any
is_test : bool
If True, use the testing channel as the message target.
Returns
-------
response : XXX
Response from Slack API when posting the image.
Raises
------
None
"""
if is_test:
channel = SLACK_TEST_CHANNEL_NAME
else:
channel = SLACK_CHANNEL_NAME
try:
response = slack_client.files_upload(
channels=channel,
thread_ts=thread_ts,
file=image_file_path,
initial_comment=initial_comment,
)
except SlackApiError as e:
print('Sending image to Slack failed.', file=sys.stderr)
response = e.response
print(f"response = {response}", file=sys.stderr)
return response

View File

@@ -4,10 +4,9 @@
# version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control system,
# used on pleiades, cheyenne, and derecho.
# used on pleiades and derecho.
# On pleiades, this job should run in roughly 9 minutes of wall-clock time.
# On cheyenne, this job should run in roughly 5 minutes of wall-clock time.
# On derecho, this job should run in roughly 5 minutes of wall-clock time.
# Example usage
@@ -16,7 +15,7 @@
# IMPORTANT: You *must* do the following in the code below:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -91,14 +90,6 @@
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=4:ncpus=36:mpiprocs=2:ompthreads=18
#-----------------------------------------------------------------------------
# For the derecho system at UCAR:
##PBS -A UJHB0019
@@ -137,16 +128,6 @@ module load comp-intel/2020.4.304 # Latest version
module load mpi-hpe/mpt.2.23
module load hdf5/1.8.18_mpt
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load mpt/2.25
# module load hdf5-mpi/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load cmake/3.26.3
@@ -207,7 +188,6 @@ export KMP_STACKSIZE=128M
# correctOMPenvironment.sh. Uncomment the line for your system, comment out the
# rest.
export OMP_NUM_THREADS=28 # pleiades
# export OMP_NUM_THREADS=36 # cheyenne
# export OMP_NUM_THREADS=128 # derecho
echo "The active environment variables are:"
@@ -234,8 +214,7 @@ echo "Running $exe on model $runid."
# On derecho, the MPT implementation of MPI is not used, and so the omplace
# tool is not available. The pinCpuCores.sh script performs the same function
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades and
# cheyenne.
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades.
# Uncomment the lines below appropriate to the system you are running on, and
# comment out the others.
@@ -244,10 +223,6 @@ echo "Running $exe on model $runid."
placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# cheyenne
# placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# derecho
# placer_cmd="$KAIJUHOME/scripts/preproc/pinCpuCores.sh"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out

View File

@@ -103,7 +103,7 @@ doStarlight = T
doRamp = F
[precipitation]
aurora_model_type = RCMONO
aurora_model_type = LINMRG
alpha = 0.2
beta = 0.4

View File

@@ -4,19 +4,18 @@
# MPI version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control system,
# used on pleiades, cheyenne, and derecho.
# used on pleiades and derecho.
# On pleiades, this job should run in roughly 75 minutes of wall-clock time.
# On cheyenne, this job should run in roughly 42 minutes of wall-clock time.
# On derecho, this job should run in roughly 20 minutes of wall-clock time.
# Example usage
# qsub geo_mpi.pbs
# IMPORTANT: You *must* do the following in the code below:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 1. Uncomment the #PBS lines for your system and model resolution, comment out
# the #PBS lines specific to other cases. Keep the common #PBS lines.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -74,16 +73,20 @@
#PBS -q normal
# This is the line where you request specific resources from the PBS system.
# system.
# system. Uncomment the line for your model resolution.
# select=2 -> Request 2 compute nodes.
# ncpus=28 -> Each compute node must have at least 28 cores. This requirement
# implies the use of the 2-socket, 14 cores/socket Broadwell nodes.
# mpiprocs=2 -> Each node will run 2 MPI ranks of the kaiju code. NOTE: This
# value should always be 2 for the kaiju software.
# mpiprocs=2 -> Each node will run 2 MPI ranks of the kaiju code.
# ompthreads=14 -> Each MPI rank will run 14 OMP threads.
# model=bro -> Each compute node must contain Broadwell chips. Specifying
# "model" is a HECC-specific PBS requirement.
# For a "D"-resolution model run:
#PBS -l select=2:ncpus=28:mpiprocs=2:ompthreads=14:model=bro+1:ncpus=28:mpiprocs=1:ompthreads=28:model=bro
# For a "Q"-resolution model run:
##PBS -l select=8:ncpus=28:mpiprocs=2:ompthreads=14:model=bro+9:ncpus=28:mpiprocs=1:ompthreads=28:model=bro
# For a "O"-resolution model run:
##PBS -l select=48:ncpus=28:mpiprocs=2:ompthreads=14:model=bro+9:ncpus=28:mpiprocs=1:ompthreads=28:model=bro
# NOTE: Everything after the "+" is an additional request for resources for
# "helper" applications in the MPI kaiju code.
@@ -91,17 +94,9 @@
# IMPORTANT NOTE: In the XML file for your run, the "decomposition" of the
# problem into MPI ranks is specified by the XML elements <iPdir>, <jPdir>, and
# <kPdir>. This breakdown is implemented on the resource request line by using
# select=N/2, where N=iPdir*jPdir*kPdir. This tells PBS to give you N/2 nodes,
# each of which will run 2 MPI ranks. More simply:
# iPdir*jPdir*kPdir = select*mpiprocs
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=2:ncpus=36:mpiprocs=2:ompthreads=18+1:ncpus=36:mpiprocs=1:ompthreads=36
# select=N/mpiprocs, where N=iPdir*jPdir*kPdir. This tells PBS to give you
# N/mpiprocs nodes, each of which will run mpiprocs MPI ranks. More simply:
# iPdir*jPdir*kPdir = select*mpiprocs.
#-----------------------------------------------------------------------------
@@ -110,6 +105,10 @@
##PBS -A UJHB0019
##PBS -q main
##PBS -l select=2:ncpus=128:mpiprocs=2:ompthreads=64+1:ncpus=128:mpiprocs=1:ompthreads=128
# Use 8 ranks/node for gamera on derecho
# Use 2 ranks for helpers
# 1 node separate for voltron
# Additional PBS chunk
#------------------------------------------------------------------------------
# END OF PBS DIRECTIVES
@@ -143,16 +142,6 @@ module load comp-intel/2020.4.304 # Latest version
module load mpi-hpe/mpt.2.23
module load hdf5/1.8.18_mpt
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load mpt/2.25
# module load hdf5-mpi/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load cmake/3.26.3
@@ -213,7 +202,6 @@ export KMP_STACKSIZE=128M
# correctOMPenvironment.sh. Uncomment the line for your system, comment out the
# rest.
export OMP_NUM_THREADS=28 # pleiades
# export OMP_NUM_THREADS=36 # cheyenne
# export OMP_NUM_THREADS=128 # derecho
echo "The active environment variables are:"
@@ -240,8 +228,7 @@ echo "Running $exe on model $runid."
# On derecho, the MPT implementation of MPI is not used, and so the omplace
# tool is not available. The pinCpuCores.sh script performs the same function
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades and
# cheyenne.
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades.
# Uncomment the lines below appropriate to the system you are running on, and
# comment out the others.
@@ -250,10 +237,6 @@ echo "Running $exe on model $runid."
placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# cheyenne
# placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# derecho
# placer_cmd="$KAIJUHOME/scripts/preproc/pinCpuCores.sh"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out

View File

@@ -41,7 +41,7 @@ pbs_template = os.path.join(
# Start and stop date of data to fetch from CDAWeb.
start_date = "2016-08-09T09:00:00"
stop_date = "2016-08-09T10:00:00"
stop_date = "2016-08-09T11:00:00"
def create_command_line_parser():

View File

@@ -91,7 +91,7 @@ doStarlight = T
doRamp = F
[precipitation]
aurora_model_type = RCMONO
aurora_model_type = LINMRG
alpha = 0.2
beta = 0.4

View File

@@ -4,10 +4,9 @@
# the serial version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control
# system, used on pleiades, cheyenne, and derecho.
# system, used on pleiades and derecho.
# On pleiades, this job should run in roughly 82 minutes of wall-clock time.
# On cheyenne, this job should run in roughly 67 minutes of wall-clock time.
# On derecho, this job should run in roughly 37 minutes of wall-clock time.
# Example usage
@@ -16,7 +15,7 @@
# IMPORTANT: You *must* do the following in the code below:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -77,15 +76,6 @@
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
# You *must* set the -A directive to your account number.
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=1:ncpus=36:ompthreads=36
#-----------------------------------------------------------------------------
# For the derecho system at UCAR:
# You *must* set the -A directive to your account number.
@@ -124,15 +114,6 @@ module load pkgsrc/2022Q1-rome # For git-lfs and cmake
module load comp-intel/2020.4.304 # Latest version
module load hdf5/1.8.18_serial
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load hdf5/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load craype/2.7.20

View File

@@ -11,7 +11,7 @@ tFin = 200.0
[output]
dtOut = 10
tsOut = 50.0
tier = F
timer = F
[physics]
doMHD = T

View File

@@ -4,19 +4,18 @@
# the MPI version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control system,
# used on pleiades, cheyenne, and derecho.
# used on pleiades and derecho.
# On pleiades, this job should run in roughly 75 minutes of wall-clock time.
# On cheyenne, this job should run in roughly 10 minutes of wall-clock time.
# On derecho, this job should run in roughly 7 minutes of wall-clock time.
# Example usage
# qsub helio_mpi.pbs
# IMPORTANT: You *must* do the following in the code below:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 1. Uncomment the #PBS lines for your system and model resolution, comment out
# the #PBS lines specific to other cases. Keep the common #PBS lines.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -71,31 +70,28 @@
#PBS -q normal
# This is the line where you request specific resources from the PBS
# system.
# system. Uncomment the line for your model resolution.
# select=4 -> Request 4 compute nodes.
# ncpus=28 -> Each compute node must have at least 28 cores. This requirement
# implies the use of the 2-socket, 14 cores/socket Broadwell nodes.
# mpiprocs=2 -> Each node will run 2 MPI ranks of the kaiju code. NOTE: This
# value should always be 2 for the kaiju software.
# mpiprocs=2 -> Each node will run 2 MPI ranks of the kaiju code.
# ompthreads=14 -> Each MPI rank will run 14 OMP threads.
# model=bro -> Each compute node must contain Broadwell chips. Specifying
# "model" is a HECC-specific PBS requirement.
# For a 128x64x128-resolution model run:
#PBS -l select=4:ncpus=28:mpiprocs=2:ompthreads=14:model=bro
# For a "256x128x256"-resolution model run:
##PBS -l select=16:ncpus=28:mpiprocs=2:ompthreads=14:model=bro
# NOTE: Everything after the "+" is an additional request for resources for
# "helper" applications in the MPI kaiju code.
# IMPORTANT NOTE: In the XML file for your run, the "decomposition" of the
# problem into MPI ranks is specified by the XML elements <iPdir>, <jPdir>, and
# <kPdir>. This breakdown is implemented on the resource request line by using
# select=N/2, where N=iPdir*jPdir*kPdir. This tells PBS to give you N/2 nodes,
# each of which will run 2 MPI ranks. More simply:
# iPdir*jPdir*kPdir = select*mpiprocs
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=4:ncpus=36:mpiprocs=2:ompthreads=18
# select=N/mpiprocs, where N=iPdir*jPdir*kPdir. This tells PBS to give you
# N/mpiprocs nodes, each of which will run mpiprocs MPI ranks. More simply:
# iPdir*jPdir*kPdir = select*mpiprocs.
#-----------------------------------------------------------------------------
@@ -137,16 +133,6 @@ module load comp-intel/2020.4.304 # Latest version
module load mpi-hpe/mpt.2.23
module load hdf5/1.8.18_mpt
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load mpt/2.25
# module load hdf5-mpi/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load cmake/3.26.3
@@ -207,7 +193,6 @@ export KMP_STACKSIZE=128M
# correctOMPenvironment.sh. Uncomment the line for your system, comment out the
# rest.
export OMP_NUM_THREADS=28 # pleiades
# export OMP_NUM_THREADS=36 # cheyenne
# export OMP_NUM_THREADS=128 # derecho
echo "The active environment variables are:"
@@ -234,8 +219,7 @@ echo "Running $exe on model $runid."
# On derecho, the MPT implementation of MPI is not used, and so the omplace
# tool is not available. The pinCpuCores.sh script performs the same function
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades and
# cheyenne.
# on derecho that correctOMPenvironment.sh and omplace perform on pleiades.
# Uncomment the lines below appropriate to the system you are running on, and
# comment out the others.
@@ -244,10 +228,6 @@ echo "Running $exe on model $runid."
placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# cheyenne
# placer_cmd="$KAIJUHOME/scripts/preproc/correctOMPenvironment.sh $nodefile omplace"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out
# derecho
# placer_cmd="$KAIJUHOME/scripts/preproc/pinCpuCores.sh"
# mpiexec $placer_cmd $exe $runid.xml >& ${exe}.${runid}.out

View File

@@ -4,10 +4,9 @@
# using the serial version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control
# system, used on pleiades, cheyenne, and derecho.
# system, used on pleiades and derecho.
# On pleiades, this job should run in roughly 110 minutes of wall-clock time.
# On cheyenne, this job should run in roughly 101 minutes of wall-clock time.
# On derecho, this job should run in roughly 137 minutes of wall-clock time.
# Example usage
@@ -16,7 +15,7 @@
# IMPORTANT: You *must* do the following in the code below:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -77,15 +76,6 @@
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
# You *must* set the -A directive to your account number.
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=1:ncpus=36:ompthreads=36
#-----------------------------------------------------------------------------
# For the derecho system at UCAR:
# You *must* set the -A directive to your account number.
@@ -124,15 +114,6 @@ module load pkgsrc/2022Q1-rome # For git-lfs and cmake
module load comp-intel/2020.4.304 # Latest version
module load hdf5/1.8.18_serial
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load hdf5/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load craype/2.7.20

View File

@@ -4,10 +4,9 @@
# serial version of the kaiju software.
# The lines starting with "#PBS" are directives for the PBS job control
# system, used on pleiades, cheyenne, and derecho.
# system, used on pleiades and derecho.
# On pleiades, this job should run in roughly 1 minute of wall-clock time.
# On cheyenne, this job should run in roughly 1 minute of wall-clock time.
# On derecho, this job should run in roughly 1 minute of wall-clock time.
# Example usage
@@ -16,7 +15,7 @@
# IMPORTANT: You *must* do the following:
# 1. Uncomment the #PBS lines for your system, comment out the #PBS lines
# specific to other systems. Keep the common #PBS lines.
# 2. (cheyenne and derecho only) Set the #PBS -A directive to your own account.
# 2. (derecho only) Set the #PBS -A directive to your own account.
# 3. Uncomment the modules lines for your system, comment out the others.
# 4. Set kaiju_install_dir to your local kaiju installation.
# 5. Set PATH to include your bin subdirectory of the build subdirectory of
@@ -77,15 +76,6 @@
#-----------------------------------------------------------------------------
# For the cheyenne system at UCAR:
# You *must* set the -A directive to your account number.
##PBS -A UJHB0015
##PBS -q regular
##PBS -l select=1:ncpus=36:ompthreads=36
#-----------------------------------------------------------------------------
# For the derecho system at UCAR:
# You *must* set the -A directive to your account number.
@@ -124,15 +114,6 @@ module load pkgsrc/2022Q1-rome # For git-lfs and cmake
module load comp-intel/2020.4.304 # Latest version
module load hdf5/1.8.18_serial
# For cheyenne:
# module load cmake/3.22.0
# module load git/2.33.1 # Needed for git-lfs
# module load ncarenv/1.3
# module load intel/2022.1
# module load geos/3.10.1 # Must come after intel/2022.1
# module load ncarcompilers/0.5.0 # Must come after intel/2022.1
# module load hdf5/1.12.2
# For derecho:
# module load ncarenv/23.06
# module load craype/2.7.20

View File

@@ -1,144 +0,0 @@
#!/usr/bin/env python
################ first figure out the time ################
import sys
import argparse
from argparse import RawTextHelpFormatter
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.time import Time
import kaipy.kaiH5 as kaiH5
import kaipy.remix.remix as remix
from multiprocessing import Pool
from psutil import cpu_count
# Defaults
nStp = -1
ftag = "msphere"
doNflux = False
printAll = False
ncpus = 1
coord = 'SM'
MainS = """Creates simple multi-panel REMIX figure for a GAMERA magnetosphere run.
Top Row - FAC (with potential contours overplotted), Pedersen and Hall Conductances
Bottom Row - Joule heating rate, particle energy and energy flux
"""
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-id',type=str,metavar="runid",default=ftag,help="RunID of data (default: %(default)s)")
parser.add_argument('-n' ,type=int,metavar="step" ,default=nStp,help="Time slice to plot, similar to msphpic.py (default: %(default)s)")
parser.add_argument('-print', action='store_true', default=printAll,help="Print list of all steps and time labels (default: %(default)s)")
parser.add_argument('-nflux', action='store_true', default=doNflux,help="Show number flux instead of energy flux (default: %(default)s)")
parser.add_argument('-ncpus', type=int,metavar="ncpus", default=ncpus,help="Number of threads to use (default: %(default)s)")
parser.add_argument('-coord', type=str,metavar="coord", default=coord,help="Coordinate system to use (default: %(default)s)")
# also, optional min/max values for plotting
# it's ugly to specify them in the command line
# but I don't know of a better way to expose them to the user
# config file wouldn't work because we'd need to provide a sample somewhere
# on second thought, this is too ugly -- don't want to do it. Leaving here just in case.
# parser.add_argument('-curMin' ,type=float,metavar="current minimum",help="FAC minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-curMax' ,type=float,metavar="current maximum",help="FAC maximum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-sigpMin' ,type=float,metavar="sigmaP minimum",help="Pedersen conductance minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-sigpMax' ,type=float,metavar="sigmaP maximum",help="Pedersen conductance maximum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-sighMin' ,type=float,metavar="sigmaH minimum",help="Hall conductance minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-sighMax' ,type=float,metavar="sigmaH maximum",help="Hall conductance maximum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-jouleMin' ,type=float,metavar="joule minimum",help="Joule heating minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-jouleMax' ,type=float,metavar="joule maximum",help="Joule heating maximum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-energyMin' ,type=float,metavar="energy minimum",help="Particle energy minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-energyMax' ,type=float,metavar="energy maximum",help="Particle energy maximum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-efluxMin' ,type=float,metavar="eflux minimum",help="Particle energy flux minimum for plotting. If not specified, defaults in the remix class are used.")
# parser.add_argument('-efluxMax' ,type=float,metavar="eflux maximum",help="Particle energy flux maximum for plotting. If not specified, defaults in the remix class are used.")
#Finalize parsing
args = parser.parse_args()
remixFile = args.id+'.mix.h5'
coord = args.coord
nsteps,sIds=kaiH5.cntSteps(remixFile)
T=kaiH5.getTs(remixFile,sIds,aID='MJD')
if args.print: # if no arguments provided
for i,tt in enumerate(T):
print('Step#%06d: '%sorted(sIds)[i],Time(tt,format='mjd').iso)
sys.exit(0)
else:
if args.n == -1: args.n = sorted(sIds)[-1] # take last step by default
if args.n not in sIds:
sys.exit("Time step not in the h5 file.")
foundT = T[sorted(sIds).index(args.n)]
print('Found time:',Time(foundT,format='mjd').iso)
################################################################
def PlotStuff(i,remixFile,T,sIds,foundT,Time,args):
# Now plotting
mpl.use('Agg')
mpl.rc('mathtext',fontset='stixsans',default='regular')
mpl.rc('font',size=10)
#print(np.size(sorted(sIds)))
# Initialize the remix class
try:
ion = remix.remix(remixFile,i,coord)
except:
ion = remix.remix(remixFile,i)
foundT = T[sorted(sIds).index(i)]
print('Found time:',Time(foundT,format='mjd').iso)
# if only plotting one variable, could just do this:
# ion.init_vars(h)
# ion.plot('potential')
for h in ['NORTH','SOUTH']:
fig = plt.figure(figsize=(12,7.5))
plt.figtext(0.5,0.94,'MIX ('+h+')\n'+Time(foundT,format='mjd').iso,
fontsize=12,multialignment='center',horizontalalignment='center')
ion.init_vars(h)
gs = gridspec.GridSpec(2,3,figure=fig,left=0.03,right=0.97, top=0.9,bottom=0.03)
ion.plot('current' ,gs=gs[0,0])
ion.plot('sigmap' ,gs=gs[0,1])
ion.plot('sigmah' ,gs=gs[0,2])
ion.plot('joule' ,gs=gs[1,0])
ion.plot('energy' ,gs=gs[1,1])
if args.nflux:
ion.plot('flux' ,gs=gs[1,2])
else:
ion.plot('eflux',gs=gs[1,2])
if coord == 'SM':
if (h.lower()=='north'):
plt.savefig('remix_n'+'%04d'%i+'.png',dpi=300)
else:
plt.savefig('remix_s'+'%04d'%i+'.png',dpi=300)
else:
if (h.lower()=='north'):
plt.savefig('remix_'+coord+'_n'+'%04d'%i+'.png',dpi=300)
else:
plt.savefig('remix_'+coord+'_s'+'%04d'%i+'.png',dpi=300)
plt.close(fig)
#ncpus = 30
ncpus = args.ncpus
ag = ((i,remixFile,T,sIds,foundT,Time,args) for i in range(1,np.size(sorted(sIds))) )
print('This system has ',cpu_count(logical= False),' cpus.')
ncpus = min(int(ncpus),cpu_count(logical=False))
print('We will use ',ncpus,' cpus for parallelization')
with Pool(processes=ncpus) as pl:
pl.starmap(PlotStuff,ag)

View File

@@ -1,42 +0,0 @@
## Gamera ##
[sim]
runid = {{ sim_runid }}
doH5g = T
H5Grid = heliogrid.h5
icType = user
pdmb = 1.0
rmeth = 7UP
[time]
tFin = {{ time_tFin }}
[spinup]
doSpin = T
tSpin = {{ spinup_tSpin }}
tIO = {{ spinup_tIO }}
[output]
dtOut = {{ output_dtOut }}
tsOut = {{ output_tsOut }}
timer = F
[physics]
doMHD = T
gamma = 1.5
[prob]
Tsolar = 25.38
[iPdir]
N = {{ iPdir_N }}
bcPeriodic = F
[jPdir]
N = {{ jPdir_N }}
bcPeriodic = F
[kPdir]
N = {{ kPdir_N }}
bcPeriodic = T
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -1,75 +0,0 @@
#!/bin/bash
#PBS -N {{ sim_runid }}
#PBS -A {{ pbs_account }}
#PBS -q {{ pbs_queue }}
#PBS -l walltime={{ pbs_walltime }}
#PBS -l select={{ pbs_select }}:ncpus={{ pbs_ncpus }}:mpiprocs={{ pbs_mpiprocs }}:ompthreads={{ pbs_ompthreads }}
#PBS -m abe
#PBS -j oe
echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run. This can be set to any desired string.
# PBS_JOBNAME is used here as an example, as it is set by the #PBS -N
# directive near the top of this file.
export RUNID=$PBS_JOBNAME
# Load the required modules for MPI kaiju.
module purge
module load ncarenv/1.3
module load intel/19.1.1
module load impi/2019.7.217
module load ncarcompilers/0.5.0 # Must be after intel
module load hdf5-mpi/1.10.8
module load cmake/3.18.2
echo "The following modules are loaded:"
module list
# Define the kaiju installation location.
# NOTE: You MUST set this variable to the path to your kaiju directory, which
# is the top-level directory created when you cloned the kaiju repository.
export KAIJU_INSTALL_DIR={{ kaiju_home }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju binary directory to the command path.
# NOTE: You should set this variable to the path to the bin subdirectory of
# your kaiju build directory. The setting below assumes that the MPI version
# of kaiju was built in the build_mpi subdirectory of the kaiju home directory
# (which is typically the same as KAIJU_INSTALL_DIR).
export PATH={{ kaiju_build_bin }}:$PATH
# Set the MPI_TYPE_DEPTH to 32.
# If this is not done, gamera_mpi.x will crash with a stack traceback that
# includes an error messge like this:
# ...
# MPT ERROR: The program attempted to construct a derived datatype with
# depth 15, but the maximum allowed depth is 14. You can increase...
# ...
# If you see error messages like this at run time, try increasing the value
# assigned to MPI_TYPE_DEPTH in the line below.
export MPI_TYPE_DEPTH=32
# Set the OMP stack size to prevent a crash.
# If this setting is ignored, the model may cause the MPI kaiju code to crash
# with a segmentation fault and core dump. The value of "100M" was chosen
# ~arbitrarily; experimentation may allow a smaller value to be used.
export OMP_STACKSIZE=100M
echo "The active environment variables are:"
printenv
# Run the model. Direct output from the program is saved in a text file.
EXE=gamhelio_mpi.x
echo "Running $EXE on model $RUNID."
# The omplace tool is used to ensure efficient pinning of MPI ranks and OMP
# threads to appropriate sockets and cores. If you omit omplace, your job
# will still run, but it probably be an order of magnitude slower than it
# would be when using omplace.
mpiexec $EXE $RUNID.xml >& ${EXE}.${RUNID}.out
echo "Job $PBS_JOBID ended at `date` on `hostname`."

View File

@@ -1,30 +0,0 @@
## Gamera ##
[sim]
runid = {{ sim_runid }}
doH5g = T
H5Grid = heliogrid.h5
icType = user
pdmb = 1.0
rmeth = 7UP
[time]
tFin = {{ time_tFin }}
[spinup]
doSpin = T
tSpin = {{ spinup_tSpin }}
tIO = {{ spinup_tIO }}
[output]
dtOut = {{ output_dtOut }}
tsOut = {{ output_tsOut }}
timer = F
[physics]
doMHD = T
gamma = 1.5
[prob]
Tsolar = 25.38
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -1,57 +0,0 @@
#!/bin/bash
#PBS -N {{ sim_runid }}
#PBS -A {{ pbs_account }}
#PBS -q {{ pbs_queue }}
#PBS -l walltime={{ pbs_walltime }}
#PBS -l select={{ pbs_select }}:ncpus={{ pbs_ncpus }}:ompthreads={{ pbs_ompthreads }}
#PBS -m abe
#PBS -j oe
echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run. This can be set to any desired string.
# PBS_JOBNAME is used here as an example, as it is set by the #PBS -N
# directive near the top of this file.
export RUNID=$PBS_JOBNAME
# Load the required modules for serial kaiju.
module purge
module load ncarenv/1.3
module load intel/19.1.1
module load ncarcompilers/0.5.0 # Must be after intel
module load hdf5/1.10.8
module load cmake/3.18.2
echo "The following modules are loaded:"
module list
# Define the kaiju installation location.
# NOTE: You MUST set this variable to the path to your kaiju directory, which
# is the top-level directory created when you cloned the kaiju repository.
export KAIJU_INSTALL_DIR={{ kaiju_home }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju binary directory to the command path.
# NOTE: You should set this variable to the path to the bin subdirectory of
# your kaiju serial build directory.
export PATH={{ kaiju_build_bin }}:$PATH
# Set the OMP stack size to prevent a crash.
# If this setting is ignored, the model may cause the MPI kaiju code to crash
# with a segmentation fault and core dump. The value of "100M" was chosen
# ~arbitrarily; experimentation may allow a smaller value to be used.
export OMP_STACKSIZE=100M
echo "The active environment variables are:"
printenv
# Run the model. Direct output from the program is saved in a text file.
EXE=gamhelio.x
echo "Running $EXE on model $RUNID."
$EXE $RUNID.xml >& ${EXE}.${RUNID}.out
echo "Job $PBS_JOBID ended at `date` on `hostname`."

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,483 @@
{
"version": "0.0.0",
"simulation": {
"job_name": {
"LEVEL": "BASIC",
"prompt": "Name to use for PBS job(s)",
"default": "helio"
},
"wsa_file": {
"LEVEL": "BASIC",
"prompt": "Path to WSA boundary condition file to use",
"default": "wsa.fits"
},
"start_date": {
"LEVEL": "BASIC",
"prompt": "Start date for simulation (yyyy-mm-ddThh:mm:ss)",
"default": "2017-08-02T19:44:23"
},
"stop_date": {
"LEVEL": "BASIC",
"prompt": "Stop date for simulation (yyyy-mm-ddThh:mm:ss)",
"default": "2017-08-02T21:44:23"
},
"use_segments": {
"LEVEL": "BASIC",
"prompt": "Do you want to split your job into multiple segments?",
"default": "N",
"valids": ["Y", "y", "N", "n"]
},
"segment_duration": {
"LEVEL": "BASIC",
"prompt": "Segment length in simulated hours"
},
"hpc_system": {
"LEVEL": "BASIC",
"prompt": "Name of HPC system",
"default": "pleiades",
"valids": ["derecho", "pleiades"]
}
},
"pbs": {
"_common": {
"account_name": {
"LEVEL": "BASIC",
"prompt": "PBS account name"
},
"run_directory": {
"LEVEL": "BASIC",
"prompt": "Run directory",
"default": "."
},
"kaiju_install_directory": {
"LEVEL": "BASIC",
"prompt": "Path to kaiju installation"
},
"kaiju_build_directory": {
"LEVEL": "BASIC",
"prompt": "Path to kaiju build directory"
},
"num_segments": {
"LEVEL": "COMPUTED",
"NOTE": "Number of sequential PBS jobs to submit for the run",
"default": "1"
}
},
"derecho": {
"queue": {
"LEVEL": "BASIC",
"prompt": "PBS queue name",
"default": "main",
"valids": ["develop", "main"]
},
"walltime": {
"LEVEL": "BASIC",
"prompt": "WARNING: You are responsible for ensuring that the wall time is sufficient to run a segment of your simulation!\nRequested wall time for each PBS job segment (HH:MM:SS)",
"default": "01:00:00"
},
"select": {
"LEVEL": "EXPERT",
"prompt": "Number of nodes to request",
"default": "1"
},
"ncpus": {
"LEVEL": "EXPERT",
"prompt": "Number of cores per node",
"default": "128"
},
"mpiprocs": {
"LEVEL": "EXPERT",
"prompt": "Number of MPI ranks per node",
"default": "2"
},
"ompthreads": {
"LEVEL": "EXPERT",
"prompt": "Number of OMP threads per MPI rank",
"default": "64"
},
"other": {
"LEVEL": "EXPERT",
"prompt": "Additional options for PBS -l",
"default": ""
},
"modules": {
"LEVEL": "EXPERT",
"prompt": "Modules to load",
"default": [
"ncarenv/23.06",
"cmake/3.26.3",
"craype/2.7.20",
"intel/2023.0.0",
"geos/3.9.1",
"ncarcompilers/1.0.0",
"cray-mpich/8.1.25",
"hdf5/1.12.2",
"mkl/2023.0.0"
]
},
"environment_variables": {
"LEVEL": "EXPERT",
"prompt": "Additional environment variable settings",
"default": null
},
"mpiexec_command": {
"LEVEL": "EXPERT",
"prompt": "MPI command to launch run",
"default": "mpiexec pinCpuCores.sh"
}
},
"pleiades": {
"queue": {
"LEVEL": "BASIC",
"prompt": "PBS queue name",
"default": "normal",
"valids": ["low", "normal", "long", "debug", "devel"]
},
"walltime": {
"LEVEL": "BASIC",
"prompt": "WARNING: You are responsible for ensuring that the wall time is sufficient to run a segment of your simulation!\nRequested wall time for each PBS job segment (HH:MM:SS)",
"default": "01:00:00"
},
"select": {
"LEVEL": "EXPERT",
"prompt": "Number of nodes to request",
"default": "1"
},
"ncpus": {
"LEVEL": "EXPERT",
"prompt": "Number of cores per node",
"default": "28"
},
"mpiprocs": {
"LEVEL": "EXPERT",
"prompt": "Number of MPI ranks per node",
"default": "2"
},
"ompthreads": {
"LEVEL": "EXPERT",
"prompt": "Number of OMP threads per MPI rank",
"default": "14"
},
"other": {
"LEVEL": "EXPERT",
"prompt": "Additional options for PBS -l",
"default": ":model=bro"
},
"modules": {
"LEVEL": "EXPERT",
"prompt": "Modules to load",
"default": [
"nas",
"pkgsrc/2022Q1-rome",
"comp-intel/2020.4.304",
"mpi-hpe/mpt.2.23",
"hdf5/1.8.18_mpt"
]
},
"environment_variables": {
"LEVEL": "EXPERT",
"prompt": "Additional environment variable settings",
"default": null
},
"mpiexec_command": {
"LEVEL": "EXPERT",
"prompt": "MPI command to launch run",
"default": "mpiexec correctOMPenvironment.sh $nodefile omplace"
}
}
},
"wsa2gamera": {
"Gamera": {
"gameraGridFile": {
"LEVEL": "EXPERT",
"prompt": "GAMERA grid file",
"default": "heliogrid.h5"
},
"GridDir": {
"LEVEL": "EXPERT",
"prompt": "Directory for GAMERA grid file"
},
"gameraIbcFile": {
"LEVEL": "EXPERT",
"prompt": "GAMERA inner boundary conditions file",
"default": "innerbc.h5"
},
"IbcDir": {
"LEVEL": "EXPERT",
"prompt": "Directory for inner boundary conditions file"
}
},
"Grid": {
"tMin": {
"LEVEL": "EXPERT",
"prompt": "Minimum solar polar angle fraction ([0, 1]->[0, 2*pi])",
"default": "0.1"
},
"tMax": {
"LEVEL": "EXPERT",
"prompt": "Maximum solar polar angle fraction ([0, 1]->[0, 2*pi])",
"default": "0.9"
},
"Rin": {
"LEVEL": "EXPERT",
"prompt": "Radius of inner grid edge (Rsun)",
"default": "21.5"
},
"Rout": {
"LEVEL": "EXPERT",
"prompt": "Radius of outer grid edge (Rsun)",
"default": "220.0"
},
"Ni": {
"LEVEL": "BASIC",
"prompt": "Number of radial grid cells",
"default": "128"
},
"Nj": {
"LEVEL": "BASIC",
"prompt": "Number of polar angle grid cells",
"default": "64"
},
"Nk": {
"LEVEL": "BASIC",
"prompt": "Number of azimuthal angle grid cells",
"default": "128"
}
},
"WSA": {
"wsafile": {
"LEVEL": "EXPERT",
"prompt": "Path to WSA FITS file",
"default": "wsa.fits"
},
"density_temperature_infile": {
"LEVEL": "EXPERT",
"prompt": "Density and pressure in the file",
"default": "no",
"valids": ["yes", "no"]
},
"gauss_smooth_width": {
"LEVEL": "EXPERT",
"prompt": "Gaussian smoothing width (Rsun)",
"default": "0"
},
"normalized": {
"LEVEL": "EXPERT",
"prompt": "Normalize values from WSA FITS file",
"default": "no",
"valids": ["yes", "no"]
}
},
"Constants": {
"gamma": {
"LEVEL": "EXPERT",
"prompt": "Solar wind adiabatic index (not used?)",
"default": "1.5"
},
"Nghost": {
"LEVEL": "EXPERT",
"prompt": "Number of ghost cells for each dimension",
"default": "4"
},
"Tsolar": {
"LEVEL": "EXPERT",
"prompt": "Solar rotation period (Earth days)",
"default": "25.38"
},
"nCS": {
"LEVEL": "EXPERT",
"prompt": "Current sheet number density (cm^-3)",
"default": "1100.0"
},
"TCS": {
"LEVEL": "EXPERT",
"prompt": "Current sheet temperature (K)",
"default": "1.0e6"
}
},
"Normalization": {
"B0": {
"LEVEL": "EXPERT",
"prompt": "Solar wind magnetic field (Gauss, not used?)",
"default": "1.0e-3"
},
"n0": {
"LEVEL": "EXPERT",
"prompt": "Solar wind number density (cm^-3, not used?)",
"default": "200.0"
}
}
},
"gamera": {
"sim": {
"runid": {
"LEVEL": "EXPERT",
"prompt": "Run ID",
"default": "helio"
},
"doH5g": {
"LEVEL": "EXPERT",
"prompt": "Use an existing HDF5 LFM grid file",
"default": "T",
"valids": ["T", "F"]
},
"H5Grid": {
"LEVEL": "EXPERT",
"prompt": "H5Grid",
"default": "heliogrid.h5"
},
"icType": {
"LEVEL": "EXPERT",
"prompt": "icType",
"default": "user"
},
"pdmb": {
"LEVEL": "EXPERT",
"prompt": "pdmb",
"default": "1.0"
},
"rmeth": {
"LEVEL": "EXPERT",
"prompt": "rmeth",
"default": "7UP"
}
},
"time": {
"tFin": {
"LEVEL": "EXPERT",
"prompt": "Time duration to simulate (hours)",
"default": "200.0"
}
},
"spinup": {
"doSpin": {
"LEVEL": "EXPERT",
"prompt": "Use a spinup period",
"default": "T",
"valids": ["T", "t", "F", "f"]
},
"tSpin": {
"LEVEL": "EXPERT",
"prompt": "Spinup time duration (simulated hours)",
"default": "200.0"
},
"tIO": {
"LEVEL": "EXPERT",
"prompt": "Simulated time (hours) to start screen output (negtive for output during spinup)",
"default": "0.0"
}
},
"output": {
"dtOut": {
"LEVEL": "EXPERT",
"prompt": "Timestep slice output interval (simulated hours)",
"default": "10.0"
},
"tsOut": {
"LEVEL": "EXPERT",
"prompt": "Screen output interval (timesteps)",
"default": "50.0"
},
"doTimer": {
"LEVEL": "EXPERT",
"prompt": "Code timing output enabled",
"default": "T",
"valids": ["T", "t", "F", "f"]
}
},
"physics": {
"doMHD": {
"LEVEL": "EXPERT",
"prompt": "doMHD",
"default": "T",
"valids": ["T", "t", "F", "f"]
},
"gamma": {
"LEVEL": "EXPERT",
"prompt": "gamma",
"default": "1.5"
}
},
"prob": {
"Tsolar": {
"LEVEL": "EXPERT",
"prompt": "T_solar",
"default": "25.38"
}
},
"iPdir": {
"N": {
"LEVEL": "EXPERT",
"prompt": "Number of MPI chunks in r-dimension",
"default": "2"
},
"bcPeriodic": {
"LEVEL": "EXPERT",
"prompt": "bcPeriodic",
"default": "F",
"valids": ["T", "t", "F", "f"]
}
},
"jPdir": {
"N": {
"LEVEL": "EXPERT",
"prompt": "Number of MPI chunks in latitude dimension",
"default": "2"
},
"bcPeriodic": {
"LEVEL": "EXPERT",
"prompt": "bcPeriodic",
"default": "F",
"valids": ["T", "t", "F", "f"]
}
},
"kPdir": {
"N": {
"LEVEL": "EXPERT",
"prompt": "Number of MPI chunks in longitude dimension",
"default": "2"
},
"bcPeriodic": {
"LEVEL": "EXPERT",
"prompt": "bcPeriodic",
"default": "T",
"valids": ["T", "t", "F", "f"]
}
},
"coupling": {
"blockHalo": {
"LEVEL": "EXPERT",
"prompt": "(GAMERA) Block halo MPI",
"default": {
"derecho": "T",
"pleiades": "F"
},
"valids": ["T", "F"]
}
},
"restart": {
"doRes": {
"LEVEL": "INTERMEDIATE",
"prompt": "Restart from a saved state",
"default": "F",
"valids": ["T", "F"]
},
"nRes": {
"LEVEL": "INTERMEDIATE",
"prompt": "Index of restart file to use (-1 for latest (XXXXX-symlink) restart file)",
"default": "-1"
},
"resID": {
"LEVEL": "INTERMEDIATE",
"prompt": "Run ID for restart step",
"default": "helio"
}
}
}
}

View File

@@ -1,42 +0,0 @@
## Gamera ##
[sim]
runid = {{ sim_runid }}
doH5g = T
H5Grid = heliogrid.h5
icType = user
pdmb = 1.0
rmeth = 7UP
[time]
tFin = {{ time_tFin }}
[spinup]
doSpin = T
tSpin = {{ spinup_tSpin }}
tIO = {{ spinup_tIO }}
[output]
dtOut = {{ output_dtOut }}
tsOut = {{ output_tsOut }}
timer = F
[physics]
doMHD = T
gamma = 1.5
[prob]
Tsolar = 25.38
[iPdir]
N = {{ iPdir_N }}
bcPeriodic = F
[jPdir]
N = {{ jPdir_N }}
bcPeriodic = F
[kPdir]
N = {{ kPdir_N }}
bcPeriodic = T
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -1,72 +0,0 @@
#!/bin/bash
#PBS -N {{ sim_runid }}
#PBS -q {{ pbs_queue }}
#PBS -l walltime={{ pbs_walltime }}
#PBS -l select={{ pbs_select }}:ncpus={{ pbs_ncpus }}:mpiprocs={{ pbs_mpiprocs }}:ompthreads={{ pbs_ompthreads }}:model=bro
#PBS -m abe
#PBS -j oe
echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run. This can be set to any desired string.
# PBS_JOBNAME is used here as an example, as it is set by the #PBS -N
# directive near the top of this file.
export RUNID=$PBS_JOBNAME
# Load the required modules for MPI kaiju.
module purge
module load pkgsrc/2021Q2
module load comp-intel/2020.4.304
module load mpi-hpe/mpt.2.23
module load hdf5/1.8.18_mpt
echo "The following modules are loaded:"
module list
# Define the kaiju installation location.
# NOTE: You MUST set this variable to the path to your kaiju directory, which
# is the top-level directory created when you cloned the kaiju repository.
export KAIJU_INSTALL_DIR={{ kaiju_home }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju binary directory to the command path.
# NOTE: You should set this variable to the path to the bin subdirectory of
# your kaiju build directory. The setting below assumes that the MPI version
# of kaiju was built in the build_mpi subdirectory of the kaiju home directory
# (which is typically the same as KAIJU_INSTALL_DIR).
export PATH={{ kaiju_build_bin }}:$PATH
# Set the MPI_TYPE_DEPTH to 32.
# If this is not done, gamera_mpi.x will crash with a stack traceback that
# includes an error messge like this:
# ...
# MPT ERROR: The program attempted to construct a derived datatype with
# depth 15, but the maximum allowed depth is 14. You can increase...
# ...
# If you see error messages like this at run time, try increasing the value
# assigned to MPI_TYPE_DEPTH in the line below.
export MPI_TYPE_DEPTH=32
# Set the OMP stack size to prevent a crash.
# If this setting is ignored, the model may cause the MPI kaiju code to crash
# with a segmentation fault and core dump. The value of "100M" was chosen
# ~arbitrarily; experimentation may allow a smaller value to be used.
export OMP_STACKSIZE=100M
echo "The active environment variables are:"
printenv
# Run the model. Direct output from the program is saved in a text file.
EXE=gamhelio_mpi.x
echo "Running $EXE on model $RUNID."
# The omplace tool is used to ensure efficient pinning of MPI ranks and OMP
# threads to appropriate sockets and cores. If you omit omplace, your job
# will still run, but it probably be an order of magnitude slower than it
# would be when using omplace.
mpiexec omplace $EXE $RUNID.xml >& ${EXE}.${RUNID}.out
echo "Job $PBS_JOBID ended at `date` on `hostname`."

View File

@@ -1,30 +0,0 @@
## Gamera ##
[sim]
runid = {{ sim_runid }}
doH5g = T
H5Grid = heliogrid.h5
icType = user
pdmb = 1.0
rmeth = 7UP
[time]
tFin = {{ time_tFin }}
[spinup]
doSpin = T
tSpin = {{ spinup_tSpin }}
tIO = {{ spinup_tIO }}
[output]
dtOut = {{ output_dtOut }}
tsOut = {{ output_tsOut }}
timer = F
[physics]
doMHD = T
gamma = 1.5
[prob]
Tsolar = 25.38
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -1,73 +0,0 @@
#!/bin/bash
#PBS -N {{ sim_runid }}
#PBS -q {{ pbs_queue }}
#PBS -l walltime={{ pbs_walltime }}
#PBS -l select={{ pbs_select }}:ncpus={{ pbs_ncpus }}:ompthreads={{ pbs_ompthreads }}:model=bro
#PBS -m abe
#PBS -j oe
echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run. This can be set to any desired string.
# PBS_JOBNAME is used here as an example, as it is set by the #PBS -N
# directive near the top of this file.
export RUNID=$PBS_JOBNAME
# Load the required modules for serial kaiju.
# NOTE: pleiades and electra do not have the "module restore" facility found
# on cheyenne. The modules must be explicitly loaded each time they are
# needed.
# NOTE: This set of modules assumes your kaiju installation was built using
# this same list of modules. If you used different modules at build time (for
# example, if you used a GNU compiler), update this list to use the modules
# from your build-time environment.
# Comment out the module lines for all systems except the one you are using.
# The default system is pleiades, using the Intel compiler.
module purge
# For pleiades/electra:
module load pkgsrc/2021Q2
module load comp-intel/2020.4.304
module load hdf5/1.8.18_serial
echo "The following modules are loaded:"
module list
echo "The following modules are loaded:"
module list
# Define the kaiju installation location.
# NOTE: You MUST set this variable to the path to your kaiju directory, which
# is the top-level directory created when you cloned the kaiju repository.
export KAIJU_INSTALL_DIR={{ kaiju_home }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju binary directory to the command path.
# NOTE: You should set this variable to the path to the bin subdirectory of
# your kaiju serial build directory.
export PATH={{ kaiju_build_bin }}:$PATH
# Set the OMP stack size to prevent a crash.
# If this setting is ignored, the model may cause the MPI kaiju code to crash
# with a segmentation fault and core dump. The value of "100M" was chosen
# ~arbitrarily; experimentation may allow a smaller value to be used.
export OMP_STACKSIZE=100M
echo "The active environment variables are:"
printenv
# Run the model. Direct output from the program is saved in a text file.
EXE=gamhelio.x
echo "Running $EXE on model $RUNID."
# The omplace tool is used to ensure efficient pinning of MPI ranks and OMP
# threads to appropriate sockets and cores. If you omit omplace, your job
# will still run, but it probably be an order of magnitude slower than it
# would be when using omplace.
$EXE $RUNID.xml >& ${EXE}.${RUNID}.out
echo "Job $PBS_JOBID ended at `date` on `hostname`."

View File

@@ -0,0 +1,50 @@
## Gamera ##
[sim]
runid = {{ gamera.sim.runid }}
doH5g = {{ gamera.sim.doH5g }}
H5Grid = {{ gamera.sim.H5Grid }}
icType = {{ gamera.sim.icType }}
pdmb = {{ gamera.sim.pdmb }}
rmeth = {{ gamera.sim.rmeth }}
[coupling]
blockHalo = {{ gamera.coupling.blockHalo }}
[time]
tFin = {{ gamera.time.tFin }}
[spinup]
doSpin = {{ gamera.spinup.doSpin }}
tSpin = {{ gamera.spinup.tSpin }}
tIO = {{ gamera.spinup.tIO }}
[output]
dtOut = {{ gamera.output.dtOut }}
tsOut = {{ gamera.output.tsOut }}
doTimer = {{ gamera.output.doTimer }}
[physics]
doMHD = {{ gamera.physics.doMHD }}
gamma = {{ gamera.physics.gamma }}
[prob]
Tsolar = {{ gamera.prob.Tsolar }}
[iPdir]
N = {{ gamera.iPdir.N }}
bcPeriodic = {{ gamera.iPdir.bcPeriodic }}
[jPdir]
N = {{ gamera.jPdir.N }}
bcPeriodic = {{ gamera.jPdir.bcPeriodic }}
[kPdir]
N = {{ gamera.kPdir.N }}
bcPeriodic = {{ gamera.kPdir.bcPeriodic }}
[restart]
doRes = {{ gamera.restart.doRes }}
resID = {{ gamera.restart.resID }}
nRes = {{ gamera.restart.nRes }}
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -0,0 +1,64 @@
#!/bin/bash
# This script was generated to run on {{ simulation.hpc_system }}.
#PBS -N {{ simulation.segment_id }}
#PBS -A {{ pbs.account_name }}
#PBS -q {{ pbs.queue }}
#PBS -l walltime={{ pbs.walltime }}
#PBS -l select={{ pbs.select }}:ncpus={{ pbs.ncpus }}:mpiprocs={{ pbs.mpiprocs }}:ompthreads={{ pbs.ompthreads }}{{ pbs.other }}
#PBS -m abe
#PBS -j oe
echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run.
export RUNID=$PBS_JOBNAME
# Some HPC systems do not start the job in the correct directory.
# Move to the intended directory. This will move the job to the correct
# directory on problem systems, such as pleiades (for some users), and have no
# effect on other systems.
cd $PBS_O_WORKDIR
# Load the required modules for MPI kaiju.
module purge
{%- for module in pbs.modules %}
module load {{ module }}
{%- endfor %}
echo "The loaded modules are:"
module list
# Define the kaiju installation location.
export KAIJU_INSTALL_DIR={{ pbs.kaiju_install_directory }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju build bin directory to PATH.
export PATH={{ pbs.kaiju_build_directory }}/bin:$PATH
# Set other environment variables.
{{ pbs.additional_environment_variables -}}
export MPI_TYPE_DEPTH=32
export OMP_STACKSIZE=100M
export OMP_NUM_THREADS={{ pbs.ompthreads }}
echo "The active environment variables are:"
printenv
# Copy the node file for use by the placer command.
nodefile=nodefile.$PBS_JOBID
cp $PBS_NODEFILE $nodefile
# Copy the main executable.
cp {{ pbs.kaiju_build_directory }}/bin/gamhelio_mpi.x ./gamhelio_mpi.x
# Run the model. Direct output from the program is saved in a text file.
EXE=./gamhelio_mpi.x
OUTPUT_FILE="$EXE-{{ simulation.segment_id }}.out"
echo "Running $EXE on model $RUNID."
{{ pbs.mpiexec_command }} $EXE $RUNID.xml &>> $OUTPUT_FILE
echo "Job $PBS_JOBID ended at `date` on `hostname`."

View File

@@ -1,42 +1,31 @@
;Comments and definitions:
;If needed, modify the paths to the grid file, output innerbc file and WSA fits file
;tMin and tMax set the range for theta [tMin, tMax]*pi
;Rin and Rout are inner and outer boundaries in the radial direction
;Ni, Nj, Nk set the number of cells in r, theta, phi directions
;Nghost is the number of ghost cells
;nCS is the number density in the current sheet for pressure balance calculation
;TCS is the temperature in the current sheet for pressure balance calculation
[Gamera]
gameraGridFile = heliogrid.h5
GridDir = {{ run_directory }}
gameraIbcFile = innerbc.h5
IbcDir = {{ run_directory }}
gameraGridFile = {{ wsa2gamera.Gamera.gameraGridFile }}
GridDir = {{ wsa2gamera.Gamera.GridDir }}
gameraIbcFile = {{ wsa2gamera.Gamera.gameraIbcFile }}
IbcDir = {{ wsa2gamera.Gamera.IbcDir }}
[Grid]
tMin = 0.1
tMax = 0.9
Rin = 21.5
Rout = 220.
Ni = {{ wsa2gamera_Grid_Ni }}
Nj = {{ wsa2gamera_Grid_Nj }}
Nk = {{ wsa2gamera_Grid_Nk }}
tMin = {{ wsa2gamera.Grid.tMin }}
tMax = {{ wsa2gamera.Grid.tMax }}
Rin = {{ wsa2gamera.Grid.Rin }}
Rout = {{ wsa2gamera.Grid.Rout }}
Ni = {{ wsa2gamera.Grid.Ni }}
Nj = {{ wsa2gamera.Grid.Nj }}
Nk = {{ wsa2gamera.Grid.Nk }}
[WSA]
;wsafile is the path to the WSA fits file relative to $KAIJUHOME
;Helio test uses WSA file for Carrington Rotation 2193, by default
wsafile = {{ wsafile }}
density_temperature_infile = no
gauss_smooth_width = 0 ; 8
normalized = no
wsafile = {{ wsa2gamera.WSA.wsafile }}
density_temperature_infile = {{ wsa2gamera.WSA.density_temperature_infile }}
gauss_smooth_width = {{ wsa2gamera.WSA.gauss_smooth_width }}
normalized = {{ wsa2gamera.WSA.normalized }}
[Constants]
gamma = 1.5
Nghost = 4
Tsolar = 25.38
nCS = 1100. ; in [cm-3]
TCS = 1.e6 ; in [K]
gamma = {{ wsa2gamera.Constants.gamma }}
Nghost = {{ wsa2gamera.Constants.Nghost }}
Tsolar = {{ wsa2gamera.Constants.Tsolar }}
nCS = {{ wsa2gamera.Constants.nCS }}
TCS = {{ wsa2gamera.Constants.TCS }}
[Normalization]
B0 = 1.e-3 ; in [Gs] equals to 100 [nT]
n0 = 200. ; in [cm-3]
B0 = {{ wsa2gamera.Normalization.B0 }}
n0 = {{ wsa2gamera.Normalization.n0 }}

View File

@@ -3,28 +3,34 @@
"""makeitso for the MAGE magnetosphere software.
This master script is used to perform all of the steps needed to prepare
for the run of a MAGE magnetosphere model. This script is interactive - the
user is prompted for each decision that must be made to prepare for the run.
This script was designed for use on HPC systems using an MPI build of the
kaiju code.
This script is perforsm all of the steps needed to prepare to run a MAGE
magnetosphere simulation run. By default, this script is interactive - the user
is prompted for each decision that must be made to prepare for the run, based
on the current "--mode" setting.
When complete, the options specified for the current run are saved to the
JSON file "options.json" in the current directory.
The modes are:
NOTE: This script assumes the KAIJUHOME environment variable has been set
correctly. This is typically done by sourcing the setup script:
$KAIJUHOME/scripts/setupEnvironment.[c]sh
"BASIC" (the default) - the user is prompted to set only a small subset of MAGE
parameters. All "INTERMEDIATE"- and "EXPERT"-mode parameters are automatically
set to default values.
"INTERMEDIATE" - The user is prompted for "BASIC" and "INTERMEDIATE"
parameters, with "EXPERT" parameters set to defaults.
"EXPERT" - The user is prompted for *all* adjustable parameters.
"""
# Import standard modules.
import argparse
import copy
import datetime
import json
import os
import subprocess
# Import 3rd-party modules.
import h5py
from jinja2 import Template
# Import project modules.
@@ -38,44 +44,11 @@ DESCRIPTION = "Interactive script to prepare a MAGE magnetosphere model run."
# Indent level for JSON output.
JSON_INDENT = 4
# Path to current kaiju installation
KAIJUHOME = os.environ["KAIJUHOME"]
# Program defaults
# Module sets by platform and run type.
modules = {
"cheyenne": [
"cmake/3.22.0",
"git/2.33.1",
"ncarenv/1.3",
"intel/2022.1",
"geos/3.10.1",
"ncarcompilers/0.5.0",
"mpt/2.25",
"hdf5-mpi/1.12.2",
],
"derecho": [
"ncarenv/23.06",
"cmake/3.26.3",
"craype/2.7.20",
"intel/2023.0.0",
"geos/3.9.1",
"ncarcompilers/1.0.0",
"cray-mpich/8.1.25",
"hdf5-mpi/1.12.2",
],
"pleiades": [
"nas"
"pkgsrc/2022Q1-rome",
"comp-intel/2020.4.304",
"mpi-hpe/mpt.2.23",
"hdf5/1.8.18_mpt",
],
}
# Path to directory containing support files.
SUPPORT_FILES_DIRECTORY = os.path.join(
os.environ["KAIJUHOME"], "scripts", "makeitso"
)
# Path to directory containing support files for makeitso.
SUPPORT_FILES_DIRECTORY = os.path.join(KAIJUHOME, "scripts", "makeitso")
# Path to option descriptions file.
OPTION_DESCRIPTIONS_FILE = os.path.join(
@@ -109,21 +82,29 @@ def create_command_line_parser():
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--debug", "-d", action="store_true", default=False,
"--clobber", action="store_true",
help="Overwrite existing options file (default: %(default)s)."
)
parser.add_argument(
"--debug", "-d", action="store_true",
help="Print debugging output (default: %(default)s)."
)
parser.add_argument(
"--options", "-o", default=None,
"--mode", default="BASIC",
help="User mode (BASIC|INTERMEDIATE|EXPERT) (default: %(default)s)."
)
parser.add_argument(
"--options_path", "-o", default=None,
help="Path to JSON file of options (default: %(default)s)"
)
parser.add_argument(
"--verbose", "-v", action="store_true", default=False,
"--verbose", "-v", action="store_true",
help="Print verbose output (default: %(default)s)."
)
return parser
def get_run_option(name, description):
def get_run_option(name, description, mode="BASIC"):
"""Prompt the user for a single run option.
Prompt the user for a single run option. If no user input is provided,
@@ -137,6 +118,8 @@ def get_run_option(name, description):
Name of option
description : dict, default None
Dictionary of metadata for the option.
mode : str
User experience mode: "BASIC", "INTERMEDIATE", or "ADVANCED".
Returns
-------
@@ -148,10 +131,18 @@ def get_run_option(name, description):
None
"""
# Extract prompt, default, and valids.
level = description["LEVEL"]
prompt = description.get("prompt", "")
default = description.get("default", None)
valids = description.get("valids", None)
# Compare the current mode to the parameter level setting. If the variable
# level is higher than the user mode, just use the default.
if mode == "BASIC" and level in ["INTERMEDIATE", "EXPERT"]:
return default
if mode == "INTERMEDIATE" and level in ["EXPERT"]:
return default
# If provided, add the valid values in val1|val2 format to the prompt.
if valids is not None:
vs = "|".join(valids)
@@ -184,7 +175,38 @@ def get_run_option(name, description):
return str(option_value)
def get_run_options():
def fetch_bcwind_time_range(bcwind_path):
"""Fetch the start and stop times for a bcwind file.
Fetch the start and stop times for a bcwind file.
Parameters
----------
bcwind_path : str
Path to bcwind file
Returns
-------
start_date, stop_date : str
First and last entries in UT group, as strings, in
'YYYY-MM-DDTHH:MM:SS' format.
Raises
------
None
"""
with h5py.File(bcwind_path, "r") as f:
start_date = f["UT"][0].decode("utf-8")
stop_date = f["UT"][-1].decode("utf-8")
# <HACK> Convert from "YYYY-MM-DD HH:MM:SS" format to
# "YYYY-MM-DDTHH:MM:SS" format.
start_date = start_date.replace(" ", "T")
stop_date = stop_date.replace(" ", "T")
# </HACK>
return start_date, stop_date
def prompt_user_for_run_options(args):
"""Prompt the user for run options.
Prompt the user for run options.
@@ -196,7 +218,8 @@ def get_run_options():
Parameters
----------
None
args : dict
Dictionary of command-line options
Returns
-------
@@ -207,145 +230,354 @@ def get_run_options():
------
None
"""
# Save the user mode.
mode = args.mode
# Read the dictionary of option descriptions.
with open(OPTION_DESCRIPTIONS_FILE, "r", encoding="utf-8") as f:
options_yaml = json.load(f)
option_descriptions = options_yaml["option_descriptions"]
option_descriptions = json.load(f)
# Initialize the dictionary of program options.
options = {}
# Fetch the HPC system and run type.
for name in ["hpc_system"]:
options[name] = get_run_option(name, option_descriptions[name])
#-------------------------------------------------------------------------
# Specify the location of the kaiju installation to use.
# The installation is assumed to be the installation which contains
# this running script. Note that this code requires that the KAIJUHOME
# environment variable is set. This environment variable is set
# automatically when the user runs the setupEnvironment.[c]sh script
# or its equivalent.
options["kaijuhome"] = os.environ["KAIJUHOME"]
# General options for the simulation
o = options["simulation"] = {}
od = option_descriptions["simulation"]
# Compute the default build directory based on KAIJUHOME and the run
# type. The build directory is the directory where cmake and make were
# run during the build process. Typically, this is done in the kaiju
# subdirectory "build_mpi" (for MPI builds).
option_descriptions["kaiju_build_directory"]["default"] = (
f"{options['kaijuhome']}/build_mpi"
)
# Prompt for the name of the job.
for on in ["job_name"]:
o[on] = get_run_option(on, od[on], mode)
# Fetch options about the kaiju software to use.
option_names = [
"kaiju_build_directory"
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# Ask the user if a boundary condition file is available. If not, offer to
# generate one from the start and end date.
for on in ["bcwind_available"]:
o[on] = get_run_option(on, od[on], mode)
if o["bcwind_available"] == "Y":
for on in ["bcwind_file"]:
o[on] = get_run_option(on, od[on], mode)
# Fetch the start and stop date from the bcwind file.
start_date, stop_date = fetch_bcwind_time_range(o[on])
o["start_date"] = start_date
o["stop_date"] = stop_date
else:
# Prompt for the start and stop date of the run. This will also be
# used as the start and stop date of the data in the boundary condition
# file, which will be created using CDAWeb data.
for on in ["start_date", "stop_date"]:
o[on] = get_run_option(on, od[on], mode)
# Fetch high-level options about the run.
# NOTE: All files must be in the run directory.
option_names = [
"run_directory", "runid", "LFM_grid_type", "solar_wind_file"
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# Compute the total simulation time in seconds, use as segment duration
# default.
date_format = '%Y-%m-%dT%H:%M:%S'
start_date = o["start_date"]
stop_date = o["stop_date"]
t1 = datetime.datetime.strptime(start_date, date_format)
t2 = datetime.datetime.strptime(stop_date, date_format)
simulation_duration = (t2 - t1).total_seconds()
od["segment_duration"]["default"] = str(simulation_duration)
# PBS job options
# Assumes all nodes have same architecture.
hpc_system = options["hpc_system"]
option_names = [
"pbs_account_name", "pbs_queue", "pbs_walltime",
"pbs_select", "pbs_ncpus",
"pbs_mpiprocs", "pbs_ompthreads",
"pbs_num_helpers", "pbs_numjobs",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# Ask if the user wants to split the run into multiple segments.
# If so, prompt for the segment duration. If not, use the default
# for the segment duration (which is the simulation duration).
for on in ["use_segments"]:
o[on] = get_run_option(on, od[on], mode)
if o["use_segments"] == "Y":
for on in ["segment_duration"]:
o[on] = get_run_option(on, od[on], mode)
else:
o["segment_duration"] = od["segment_duration"]["default"]
# The helper nodes use one OMP thread per core.
options["pbs_helper_ompthreads"] = options["pbs_ncpus"]
# Compute the number of segments based on the simulation duration and
# segment duration, with 1 additional segment just for spinup. Add 1 if
# there is a remainder.
if o["use_segments"] == "Y":
num_segments = simulation_duration/float(o["segment_duration"])
if num_segments > int(num_segments):
num_segments += 1
num_segments = int(num_segments) + 1
else:
num_segments = 1
# Specify the command to launch an MPI program.
options["pbs_mpiexec_command"] = "mpiexec omplace"
# Prompt for the remaining parameters.
for on in ["gamera_grid_type", "gamera_grid_inner_radius", "hpc_system"]:
o[on] = get_run_option(on, od[on], mode)
# Assemble the module load statements into a single string.
hpc_system = options["hpc_system"]
module_names = modules[hpc_system]
pbs_module_load = ""
for module_name in module_names:
pbs_module_load += f"module load {module_name}\n"
pbs_module_load = pbs_module_load.rstrip()
options["pbs_module_load"] = pbs_module_load
#-------------------------------------------------------------------------
# Specify other environment variables to set.
options["pbs_additional_environment_variables"] = ""
# PBS options
options["pbs"] = {}
o = options["pbs"]
# GAMERA section
option_names = [
"gamera_sim_doH5g", "gamera_sim_H5Grid", "gamera_sim_icType",
"gamera_sim_pdmb", "gamera_sim_rmeth",
"gamera_floors_dFloor", "gamera_floors_pFloor",
"gamera_timestep_doCPR", "gamera_timestep_limCPR",
"gamera_restart_doRes", "gamera_restart_resID", "gamera_restart_nRes",
"gamera_physics_doMHD", "gamera_physics_doBoris", "gamera_physics_Ca",
"gamera_ring_gid", "gamera_ring_doRing",
"gamera_ringknobs_doVClean",
"gamera_wind_tsfile",
"gamera_source_doSource", "gamera_source_doWolfLim",
"gamera_source_doBounceDT", "gamera_source_nBounce",
"gamera_iPdir_N", "gamera_iPdir_bcPeriodic",
"gamera_jPdir_N", "gamera_jPdir_bcPeriodic",
"gamera_kPdir_N", "gamera_kPdir_bcPeriodic",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# Common (HPC platform-independent) options
od = option_descriptions["pbs"]["_common"]
od["account_name"]["default"] = os.getlogin()
od["kaiju_install_directory"]["default"] = KAIJUHOME
od["kaiju_build_directory"]["default"] = os.path.join(KAIJUHOME, "build_mpi")
od["num_segments"]["default"] = str(num_segments)
for on in od:
o[on] = get_run_option(on, od[on], mode)
# VOLTRON section
option_names = [
"voltron_time_tFin",
"voltron_spinup_doSpin", "voltron_spinup_tSpin",
"voltron_output_dtOut", "voltron_output_tsOut",
"voltron_coupling_dtCouple", "voltron_coupling_rTrc",
"voltron_coupling_imType", "voltron_coupling_doQkSquish",
"voltron_coupling_qkSquishStride",
"voltron_restart_dtRes",
"voltron_imag_doInit",
"voltron_ebsquish_epsSquish",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# HPC platform-specific options
hpc_platform = options["simulation"]["hpc_system"]
gamera_grid_type = options["simulation"]["gamera_grid_type"]
od = option_descriptions["pbs"][hpc_platform]
od["select"]["default"] = od["select"]["default"][gamera_grid_type]
od["num_helpers"]["default"] = od["num_helpers"]["default"][gamera_grid_type]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# CHIMP section
option_names = [
"chimp_units_uid",
"chimp_fields_grType",
"chimp_domain_dtype",
"chimp_tracer_epsds",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# Compute the number of nodes in the second chunk.
# Should be 1 (for voltron) + num_helpers.
num_helpers = int(o["num_helpers"])
select2 = 1 + num_helpers
o["select2"] = str(select2)
# REMIX section
option_names = [
"remix_conductance_doStarlight", "remix_conductance_doRamp",
"remix_precipitation_aurora_model_type", "remix_precipitation_alpha",
"remix_precipitation_beta",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
#-------------------------------------------------------------------------
# RCM section
option_names = [
"rcm_rcmdomain_domType",
"rcm_ellipse_xSun", "rcm_ellipse_yDD", "rcm_ellipse_xTail",
"rcm_ellipse_isDynamic", "rcm_grid_LowLat", "rcm_grid_HiLat",
"rcm_grid_doLatStretch",
"rcm_plasmasphere_isDynamic", "rcm_plasmasphere_initKp",
"rcm_plasmasphere_doRefill", "rcm_plasmasphere_DenPP0",
"rcm_plasmasphere_tAvg",
]
for name in option_names:
options[name] = get_run_option(name, option_descriptions[name])
# GAMERA options
options["gamera"] = {}
# <sim> options
options["gamera"]["sim"] = {}
o = options["gamera"]["sim"]
od = option_descriptions["gamera"]["sim"]
od["H5Grid"]["default"] = f"lfm{options['simulation']['gamera_grid_type']}.h5"
od["runid"]["default"] = options["simulation"]["job_name"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <floors> options
options["gamera"]["floors"] = {}
o = options["gamera"]["floors"]
od = option_descriptions["gamera"]["floors"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <timestep> options
options["gamera"]["timestep"] = {}
o = options["gamera"]["timestep"]
od = option_descriptions["gamera"]["timestep"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <restart> options
# NOTE: Update this later so that restart parameters are only
# prompted for when doRes is "T".
options["gamera"]["restart"] = {}
o = options["gamera"]["restart"]
od = option_descriptions["gamera"]["restart"]
od["resID"]["default"] = options["simulation"]["job_name"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <physics> options
options["gamera"]["physics"] = {}
o = options["gamera"]["physics"]
od = option_descriptions["gamera"]["physics"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <ring> options
options["gamera"]["ring"] = {}
o = options["gamera"]["ring"]
od = option_descriptions["gamera"]["ring"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <wind> options
options["gamera"]["wind"] = {}
o = options["gamera"]["wind"]
od = option_descriptions["gamera"]["wind"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <source> options
options["gamera"]["source"] = {}
o = options["gamera"]["source"]
od = option_descriptions["gamera"]["source"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <iPdir> options
options["gamera"]["iPdir"] = {}
o = options["gamera"]["iPdir"]
od = option_descriptions["gamera"]["iPdir"]
od["N"]["default"] = od["N"]["default"][gamera_grid_type]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <jPdir> options
options["gamera"]["jPdir"] = {}
o = options["gamera"]["jPdir"]
od = option_descriptions["gamera"]["jPdir"]
od["N"]["default"] = od["N"]["default"][gamera_grid_type]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <kPdir> options
options["gamera"]["kPdir"] = {}
o = options["gamera"]["kPdir"]
od = option_descriptions["gamera"]["kPdir"]
od["N"]["default"] = od["N"]["default"][gamera_grid_type]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <coupling> options
options["gamera"]["coupling"] = {}
o = options["gamera"]["coupling"]
od = option_descriptions["gamera"]["coupling"]
od["blockHalo"]["default"] = od["blockHalo"]["default"][hpc_platform]
for on in od:
o[on] = get_run_option(on, od[on], mode)
#-------------------------------------------------------------------------
# VOLTRON options
options["voltron"] = {}
# <time> options
options["voltron"]["time"] = {}
o = options["voltron"]["time"]
od = option_descriptions["voltron"]["time"]
od["tFin"]["default"] = simulation_duration
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <spinup> options
options["voltron"]["spinup"] = {}
o = options["voltron"]["spinup"]
od = option_descriptions["voltron"]["spinup"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <output> options
options["voltron"]["output"] = {}
o = options["voltron"]["output"]
od = option_descriptions["voltron"]["output"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <coupling> options
options["voltron"]["coupling"] = {}
o = options["voltron"]["coupling"]
od = option_descriptions["voltron"]["coupling"]
od["doAsyncCoupling"]["default"] = od["doAsyncCoupling"]["default"][hpc_platform]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <restart> options
options["voltron"]["restart"] = {}
o = options["voltron"]["restart"]
od = option_descriptions["voltron"]["restart"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <imag> options
options["voltron"]["imag"] = {}
o = options["voltron"]["imag"]
od = option_descriptions["voltron"]["imag"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <helpers> options
options["voltron"]["helpers"] = {}
o = options["voltron"]["helpers"]
od = option_descriptions["voltron"]["helpers"]
od["numHelpers"]["default"] = num_helpers
if num_helpers == 0:
od["useHelpers"]["default"] = "F"
for on in od:
o[on] = get_run_option(on, od[on], mode)
#-------------------------------------------------------------------------
# CHIMP options
options["chimp"] = {}
# <units> options
options["chimp"]["units"] = {}
o = options["chimp"]["units"]
od = option_descriptions["chimp"]["units"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <fields> options
options["chimp"]["fields"] = {}
o = options["chimp"]["fields"]
od = option_descriptions["chimp"]["fields"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <domain> options
options["chimp"]["domain"] = {}
o = options["chimp"]["domain"]
od = option_descriptions["chimp"]["domain"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <tracer> options
options["chimp"]["tracer"] = {}
o = options["chimp"]["tracer"]
od = option_descriptions["chimp"]["tracer"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
#-------------------------------------------------------------------------
# REMIX options
options["remix"] = {}
# <conductance> options
options["remix"]["conductance"] = {}
o = options["remix"]["conductance"]
od = option_descriptions["remix"]["conductance"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <precipitation> options
options["remix"]["precipitation"] = {}
o = options["remix"]["precipitation"]
od = option_descriptions["remix"]["precipitation"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
#-------------------------------------------------------------------------
# RCM options
options["rcm"] = {}
# <conductance> options
options["rcm"]["rcmdomain"] = {}
o = options["rcm"]["rcmdomain"]
od = option_descriptions["rcm"]["rcmdomain"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <ellipse> options
# Only use if domType == "ELLIPSE"?
options["rcm"]["ellipse"] = {}
o = options["rcm"]["ellipse"]
od = option_descriptions["rcm"]["ellipse"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <grid> options
options["rcm"]["grid"] = {}
o = options["rcm"]["grid"]
od = option_descriptions["rcm"]["grid"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
# <plasmasphere> options
options["rcm"]["plasmasphere"] = {}
o = options["rcm"]["plasmasphere"]
od = option_descriptions["rcm"]["plasmasphere"]
for on in od:
o[on] = get_run_option(on, od[on], mode)
#-------------------------------------------------------------------------
# Return the options dictionary.
return options
@@ -372,9 +604,18 @@ def run_preprocessing_steps(options):
# Create the LFM grid file.
# NOTE: Assumes genLFM.py is in PATH.
cmd = "genLFM.py"
args = [cmd, "-gid", options["LFM_grid_type"]]
args = [cmd, "-gid", options["simulation"]["gamera_grid_type"],
'-Rin', options["simulation"]["gamera_grid_inner_radius"]]
subprocess.run(args, check=True)
# If needed, create the solar wind file by fetching data from CDAWeb.
# NOTE: Assumes cda2wind.py is in PATH.
if options["simulation"]["bcwind_available"] == "N":
cmd = "cda2wind.py"
args = [cmd, "-t0", options["simulation"]["start_date"], "-t1",
options["simulation"]["stop_date"], "-interp", "-bx"]
subprocess.run(args, check=True)
# Create the RCM configuration file.
# NOTE: Assumes genRCM.py is in PATH.
cmd = "genRCM.py"
@@ -410,31 +651,63 @@ def create_ini_files(options):
# Initialize the list of file paths.
ini_files = []
# If a single segment was requested, create a single file.
# If multiple segments were requested, create an .ini file for each
# segment.
if int(options["pbs_numjobs"]) == 1:
ini_content = template.render(options)
ini_file = os.path.join(
# Create the job scripts.
if int(options["pbs"]["num_segments"]) > 1:
options["run_directory"], f"{options['runid']}.ini"
# Create an .ini file for the spinup segment.
opt = copy.deepcopy(options) # Need a copy of options
runid = opt["simulation"]["job_name"]
job = 0
segment_id = f"{runid}-{job:02d}"
opt["simulation"]["segment_id"] = segment_id
tFin = float(opt["voltron"]["time"]["tFin"])
dT = float(options["simulation"]["segment_duration"])
tFin_segment = 1.0 # Just perform spinup in first segment
opt["voltron"]["time"]["tFin"] = str(tFin_segment)
ini_content = template.render(opt)
ini_file = os.path.join(
opt["pbs"]["run_directory"], f"{opt['simulation']['segment_id']}.ini"
)
ini_files.append(ini_file)
with open(ini_file, "w", encoding="utf-8") as f:
f.write(ini_content)
ini_files.append(ini_file)
else:
for job in range(int(options["pbs_numjobs"])):
opt = options # Need a copy of options
if job > 0:
opt["gamera_restart_doRes"] = "T"
ini_content = template.render(options)
# Create an .ini file for each simulation segment.
for job in range(1, int(options["pbs"]["num_segments"])):
opt = copy.deepcopy(options) # Need a copy of options
runid = opt["simulation"]["job_name"]
segment_id = f"{runid}-{job:02d}"
opt["simulation"]["segment_id"] = segment_id
opt["gamera"]["restart"]["doRes"] = "T"
tFin = float(opt["voltron"]["time"]["tFin"])
dT = float(options["simulation"]["segment_duration"])
tFin_segment = job*dT + 1 # Add 1 to ensure last restart file is created
if tFin_segment > tFin: # Last segment may be shorter than the others.
tFin_segment = tFin + 1
opt["voltron"]["time"]["tFin"] = str(tFin_segment)
ini_content = template.render(opt)
ini_file = os.path.join(
options["run_directory"], f"{options['runid']}-{job:02d}.ini"
opt["pbs"]["run_directory"], f"{opt['simulation']['segment_id']}.ini"
)
ini_files.append(ini_file)
with open(ini_file, "w", encoding="utf-8") as f:
f.write(ini_content)
else:
# Use a single job segment.
job = 0
opt = copy.deepcopy(options) # Need a copy of options
runid = opt["simulation"]["job_name"]
segment_id = f"{runid}-{job:02d}"
opt["simulation"]["segment_id"] = segment_id
ini_content = template.render(opt)
ini_file = os.path.join(
opt["pbs"]["run_directory"], f"{opt['simulation']['segment_id']}.ini"
)
ini_files.append(ini_file)
with open(ini_file, "w", encoding="utf-8") as f:
f.write(ini_content)
# Return the paths to the .ini files.
return ini_files
@@ -442,7 +715,8 @@ def create_ini_files(options):
def convert_ini_to_xml(ini_files):
"""Convert the .ini files to XML.
Convert the .ini files describing the run to XML files.
Convert the .ini files describing the run to XML files. The intermediate
.ini files are then deleted.
Parameters
----------
@@ -474,6 +748,9 @@ def convert_ini_to_xml(ini_files):
# Add this file to the list of XML files.
xml_files.append(xml_file)
# Remove the .ini file.
os.remove(ini_file)
# Return the paths to the XML files.
return xml_files
@@ -492,40 +769,49 @@ def create_pbs_scripts(options):
-------
pbs_scripts : list of str
Paths to PBS job script.
submit_all_jobs_script : str
Path to script which submits all PBS jobs.
Raises
------
None
TypeError:
For a non-integral of nodes requested
"""
# Read and create the template.
# Compute the number of nodes to request based on the MPI decomposition
# and the MPI ranks per node.
ni = int(options["gamera"]["iPdir"]["N"])
nj = int(options["gamera"]["jPdir"]["N"])
nk = int(options["gamera"]["kPdir"]["N"])
ranks_per_node = int(options["pbs"]["mpiprocs"])
select_nodes = ni*nj*nk/ranks_per_node
if int(select_nodes) != select_nodes:
raise TypeError(f"Requested non-integral node count ({select_nodes})!")
options["pbs"]["select"] = str(int(select_nodes))
# Read the template.
with open(PBS_TEMPLATE, "r", encoding="utf-8") as f:
template_content = f.read()
template = Template(template_content)
# Create a PBS script for each segment.
pbs_scripts = []
if int(options["pbs_numjobs"]) == 1:
pbs_content = template.render(options)
for job in range(int(options["pbs"]["num_segments"])):
opt = copy.deepcopy(options) # Need a copy of options
runid = opt["simulation"]["job_name"]
segment_id = f"{runid}-{job:02d}"
opt["simulation"]["segment_id"] = segment_id
pbs_content = template.render(opt)
pbs_script = os.path.join(
options["run_directory"], f"{options['runid']}.pbs"
opt["pbs"]["run_directory"],
f"{opt['simulation']['segment_id']}.pbs"
)
pbs_scripts.append(pbs_script)
with open(pbs_script, "w", encoding="utf-8") as f:
f.write(pbs_content)
pbs_scripts.append(pbs_script)
else:
for segment in range(int(options["pbs_numjobs"])):
pbs_content = template.render(options)
pbs_script = os.path.join(
options["run_directory"],
f"{options['runid']}-{segment:02d}.pbs"
)
pbs_scripts.append(pbs_script)
with open(pbs_script, "w", encoding="utf-8") as f:
f.write(pbs_content)
# Create a single script which will submit all of the PBS jobs in order.
path = "submit_pbs.sh"
with open(path, "w", encoding="utf-8") as f:
submit_all_jobs_script = f"{options['simulation']['job_name']}_pbs.sh"
with open(submit_all_jobs_script, "w", encoding="utf-8") as f:
s = pbs_scripts[0]
cmd = f"job_id=`qsub {s}`\n"
f.write(cmd)
@@ -538,9 +824,9 @@ def create_pbs_scripts(options):
f.write(cmd)
cmd = f"echo $job_id\n"
f.write(cmd)
# Return the paths to the PBS scripts.
return pbs_scripts
return pbs_scripts, submit_all_jobs_script
def main():
@@ -565,11 +851,12 @@ def main():
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
options_path = args.options
verbose = args.verbose
if debug:
if args.debug:
print(f"args = {args}")
clobber = args.clobber
debug = args.debug
options_path = args.options_path
verbose = args.verbose
# Fetch the run options.
if options_path:
@@ -578,21 +865,21 @@ def main():
options = json.load(f)
else:
# Prompt the user for the run options.
options = get_run_options()
options = prompt_user_for_run_options(args)
if debug:
print(f"options = {options}")
# Save the options dictionary as a JSON file.
path = os.path.join(options["run_directory"], "options.json")
# Move to the run directory.
os.chdir(options["pbs"]["run_directory"])
# Save the options dictionary as a JSON file in the current directory.
path = f"{options['simulation']['job_name']}.json"
if os.path.exists(path):
if not clobber:
raise FileExistsError(f"Options file {path} exists!")
with open(path, "w", encoding="utf-8") as f:
json.dump(options, f, indent=JSON_INDENT)
# Save the current directory.
original_directory = os.getcwd()
# Move to the output directory.
os.chdir(options["run_directory"])
# Run the preprocessing steps.
if verbose:
print("Running preprocessing steps.")
@@ -615,13 +902,14 @@ def main():
# Create the PBS job script(s).
if verbose:
print("Creating PBS job script(s) for run.")
pbs_scripts = create_pbs_scripts(options)
pbs_scripts, all_jobs_script = create_pbs_scripts(options)
if verbose:
print(f"The PBS job scripts {pbs_scripts} are ready.")
# Move back to the original directory.
os.chdir(original_directory)
print(f"The PBS scripts {pbs_scripts} have been created, each with a "
"corresponding XML file. To submit the jobs with the proper "
"dependency (to ensure each segment runs in order), please run the "
f"script {all_jobs_script} like this:\n"
f"bash {all_jobs_script}")
if __name__ == "__main__":
"""Begin main program."""

File diff suppressed because it is too large Load Diff

View File

@@ -1,133 +1,134 @@
## Gamera ##
[sim]
runid = {{ runid }}
doH5g = {{ gamera_sim_doH5g }}
H5Grid = {{ gamera_sim_H5Grid }}
icType = {{ gamera_sim_icType }}
pdmb = {{ gamera_sim_pdmb }}
rmeth = {{ gamera_sim_rmeth }}
[floors]
dFloor = {{ gamera_floors_dFloor }}
pFloor = {{ gamera_floors_pFloor }}
[timestep]
doCPR = {{ gamera_timestep_doCPR }}
limCPR = {{ gamera_timestep_limCPR }}
[restart]
doRes = {{ gamera_restart_doRes }}
resID = {{ gamera_restart_resID }}
nRes = {{ gamera_restart_nRes }}
[physics]
doMHD = {{ gamera_physics_doMHD }}
doBoris = {{ gamera_physics_doBoris }}
Ca = {{ gamera_physics_Ca }}
[ring]
gid = {{ gamera_ring_gid }}
doRing = {{ gamera_ring_doRing }}
[ringknobs]
doVClean = {{ gamera_ringknobs_doVClean }}
[wind]
tsfile = {{ gamera_wind_tsfile }}
[source]
doSource = {{ gamera_source_doSource }}
doWolfLim = {{ gamera_source_doWolfLim }}
doBounceDT = {{ gamera_source_doBounceDT }}
nBounce = {{ gamera_source_nBounce }}
[iPdir]
N = {{ gamera_iPdir_N }}
bcPeriodic = {{ gamera_iPdir_bcPeriodic }}
[jPdir]
N = {{ gamera_jPdir_N }}
bcPeriodic = {{ gamera_jPdir_bcPeriodic }}
[kPdir]
N = {{ gamera_kPdir_N }}
bcPeriodic = {{ gamera_kPdir_bcPeriodic }}
## VOLTRON ##
[time]
tFin = {{ voltron_time_tFin }}
tFin = {{ voltron.time.tFin }}
[spinup]
doSpin = {{ voltron_spinup_doSpin }}
tSpin = {{ voltron_spinup_tSpin }}
doSpin = {{ voltron.spinup.doSpin }}
tSpin = {{ voltron.spinup.tSpin }}
[output]
dtOut = {{ voltron_output_dtOut }}
tsOut = {{ voltron_output_tsOut }}
dtOut = {{ voltron.output.dtOut }}
tsOut = {{ voltron.output.tsOut }}
[coupling]
dtCouple = {{ voltron_coupling_dtCouple }}
rTrc = {{ voltron_coupling_rTrc }}
imType = {{ voltron_coupling_imType }}
doQkSquish = {{ voltron_coupling_doQkSquish }}
qkSquishStride = {{ voltron_coupling_qkSquishStride }}
dtCouple = {{ voltron.coupling.dtCouple }}
imType = {{ voltron.coupling.imType }}
doQkSquish = {{ voltron.coupling.doQkSquish }}
qkSquishStride = {{ voltron.coupling.qkSquishStride }}
doAsyncCoupling = {{ voltron.coupling.doAsyncCoupling }}
[restart]
dtRes = {{ voltron_restart_dtRes }}
dtRes = {{ voltron.restart.dtRes }}
[imag]
doInit = {{ voltron_imag_doInit }}
doInit = {{ voltron.imag.doInit }}
[ebsquish]
epsSquish = {{ voltron_ebsquish_epsSquish }}
[helpers]
numHelpers = {{ voltron.helpers.numHelpers }}
useHelpers = {{ voltron.helpers.useHelpers }}
doSquishHelp = {{ voltron.helpers.doSquishHelp }}
## Gamera ##
[sim]
runid = {{ simulation.job_name }}
doH5g = {{ gamera.sim.doH5g }}
H5Grid = {{ gamera.sim.H5Grid }}
icType = {{ gamera.sim.icType }}
pdmb = {{ gamera.sim.pdmb }}
rmeth = {{ gamera.sim.rmeth }}
[floors]
dFloor = {{ gamera.floors.dFloor }}
pFloor = {{ gamera.floors.pFloor }}
[timestep]
doCPR = {{ gamera.timestep.doCPR }}
limCPR = {{ gamera.timestep.limCPR }}
[restart]
doRes = {{ gamera.restart.doRes }}
resID = {{ gamera.restart.resID }}
nRes = {{ gamera.restart.nRes }}
[physics]
doMHD = {{ gamera.physics.doMHD }}
doBoris = {{ gamera.physics.doBoris }}
Ca = {{ gamera.physics.Ca }}
[ring]
gid = {{ gamera.ring.gid }}
doRing = {{ gamera.ring.doRing }}
[wind]
tsfile = {{ gamera.wind.tsfile }}
[source]
doSource = {{ gamera.source.doSource }}
doWolfLim = {{ gamera.source.doWolfLim }}
doBounceDT = {{ gamera.source.doBounceDT }}
nBounce = {{ gamera.source.nBounce }}
[iPdir]
N = {{ gamera.iPdir.N }}
bcPeriodic = {{ gamera.iPdir.bcPeriodic }}
[jPdir]
N = {{ gamera.jPdir.N }}
bcPeriodic = {{ gamera.jPdir.bcPeriodic }}
[kPdir]
N = {{ gamera.kPdir.N }}
bcPeriodic = {{ gamera.kPdir.bcPeriodic }}
[coupling]
blockHalo = {{ gamera.coupling.blockHalo }}
## CHIMP ##
[units]
uid = {{ chimp_units_uid }}
uid = {{ chimp.units.uid }}
[fields]
grType = {{ chimp_fields_grType }}
grType = {{ chimp.fields.grType }}
[domain]
dtype = {{ chimp_domain_dtype }}
dtype = {{ chimp.domain.dtype }}
[tracer]
epsds = {{ chimp_tracer_epsds }}
epsds = {{ chimp.tracer.epsds }}
## REMIX ##
[conductance]
doStarlight = {{ remix_conductance_doStarlight }}
doRamp = {{ remix_conductance_doRamp }}
doStarlight = {{ remix.conductance.doStarlight }}
apply_cap = {{ remix.conductance.apply_cap }}
[precipitation]
aurora_model_type = {{ remix_precipitation_aurora_model_type }}
alpha = {{ remix_precipitation_alpha }}
beta = {{ remix_precipitation_beta }}
aurora_model_type = {{ remix.precipitation.aurora_model_type }}
beta = {{ remix.precipitation.beta }}
doAuroralSmooth = {{ remix.precipitation.doAuroralSmooth }}
## RCM ##
[rcmdomain]
domType = {{ rcm_rcmdomain_domType }}
domType = {{ rcm.rcmdomain.domType }}
[ellipse]
xSun = {{ rcm_ellipse_xSun }}
yDD = {{ rcm_ellipse_yDD }}
xTail = {{ rcm_ellipse_xTail }}
isDynamic = {{ rcm_ellipse_isDynamic }}
xSun = {{ rcm.ellipse.xSun }}
yDD = {{ rcm.ellipse.yDD }}
xTail = {{ rcm.ellipse.xTail }}
isDynamic = {{ rcm.ellipse.isDynamic }}
[grid]
LowLat = {{ rcm_grid_LowLat }}
HiLat = {{ rcm_grid_HiLat }}
doLatStretch = {{ rcm_grid_doLatStretch }}
LowLat = {{ rcm.grid.LowLat }}
HiLat = {{ rcm.grid.HiLat }}
[plasmasphere]
isDynamic = {{ rcm_plasmasphere_isDynamic }}
initKp = {{ rcm_plasmasphere_initKp }}
doRefill = {{ rcm_plasmasphere_doRefill }}
DenPP0 = {{ rcm_plasmasphere_DenPP0 }}
tAvg = {{ rcm_plasmasphere_tAvg }}
isDynamic = {{ rcm.plasmasphere.isDynamic }}
initKp = {{ rcm.plasmasphere.initKp }}
doRefill = {{ rcm.plasmasphere.doRefill }}
tAvg = {{ rcm.plasmasphere.tAvg }}
# THERE MUST BE NO TRAILING BLANK LINES AT THE END OF THIS FILE!

View File

@@ -1,12 +1,12 @@
#!/bin/bash
# This script was generated to run on {{ hpc_system }}.
# This script was generated to run on {{ simulation.hpc_system }}.
#PBS -N {{ runid }}
#PBS -A {{ pbs_account_name }}
#PBS -q {{ pbs_queue }}
#PBS -l walltime={{ pbs_walltime }}
#PBS -l select={{ pbs_select }}:ncpus={{ pbs_ncpus }}:mpiprocs={{ pbs_mpiprocs }}:ompthreads={{ pbs_ompthreads }}+{{ pbs_num_helpers }}:ncpus={{ pbs_ncpus }}:mpiprocs=1:ompthreads={{ pbs_helper_ompthreads }}
#PBS -N {{ simulation.segment_id }}
#PBS -A {{ pbs.account_name }}
#PBS -q {{ pbs.queue }}
#PBS -l walltime={{ pbs.walltime }}
#PBS -l select={{ pbs.select }}:ncpus={{ pbs.ncpus }}:mpiprocs={{ pbs.mpiprocs }}:ompthreads={{ pbs.ompthreads }}{{ pbs.other }}+{{ pbs.select2 }}:ncpus={{ pbs.ncpus }}:mpiprocs={{ pbs.helper_mpiprocs }}:ompthreads={{ pbs.helper_ompthreads }}{{ pbs.other }}
#PBS -m abe
#PBS -j oe
@@ -15,32 +15,49 @@ echo "Job $PBS_JOBID started at `date` on `hostname`."
# Specify the ID string for the run.
export RUNID=$PBS_JOBNAME
# Some HPC systems do not start the job in the correct directory.
# Move to the intended directory. This will move the job to the correct
# directory on problem systems, such as pleiades (for some users), and have no
# effect on other systems.
cd $PBS_O_WORKDIR
# Load the required modules for kaiju.
module purge
{{ pbs_module_load}}
{%- for module in pbs.modules %}
module load {{ module }}
{%- endfor %}
module list
# Define the kaiju installation location.
export KAIJU_INSTALL_DIR={{ kaijuhome }}
export KAIJU_INSTALL_DIR={{ pbs.kaiju_install_directory }}
# Set kaiju-related environment variables.
# This script sets KAIJUHOME and other environment variables.
source $KAIJU_INSTALL_DIR/scripts/setupEnvironment.sh
# Add the kaiju build bin directory to PATH.
export PATH={{ kaiju_build_directory }}/bin:$PATH
export PATH={{ pbs.kaiju_build_directory }}/bin:$PATH
# Set other environment variables.
{{ pbs_additional_environment_variables }}
{{ pbs.additional_environment_variables -}}
export MPI_TYPE_DEPTH=32
export OMP_STACKSIZE=100M
export OMP_NUM_THREADS={{ pbs.helper_ompthreads }}
echo "The active environment variables are:"
printenv
# Copy the node file for use by the placer command.
nodefile=nodefile.$PBS_JOBID
cp $PBS_NODEFILE $nodefile
# Copy the main executable.
cp {{ pbs.kaiju_build_directory }}/bin/voltron_mpi.x ./voltron_mpi.x
# Run the model. Direct output from the program is saved in a text file.
EXE=voltron_mpi.x
EXE=./voltron_mpi.x
OUTPUT_FILE="$EXE-{{ simulation.segment_id }}.out"
echo "Running $EXE on model $RUNID."
{{ pbs_mpiexec_command }} $EXE $RUNID.xml >& $RUNID.out
{{ pbs.mpiexec_command }} $EXE $RUNID.xml &>> $OUTPUT_FILE
echo "Job $PBS_JOBID ended at `date` on `hostname`."

View File

@@ -49,6 +49,7 @@ if __name__ == "__main__":
parser.add_argument('-grid',type=str,metavar="grid",default=grid,help="inGrid file to read from (default: %(default)s)")
parser.add_argument('--keep',action='store_true',default=False,help='Keep intermediate files (default: %(default)s)')
parser.add_argument('--norescale',action='store_true',default=False,help='Do not rescale (up or down) (default: %(default)s)')
parser.add_argument('--down',action='store_true',default=False,help='Downscale instead of upscale (default: %(default)s)')
@@ -68,6 +69,7 @@ if __name__ == "__main__":
grid = args.grid
doKeep = args.keep
doUp = not args.down
doRescale = not args.norescale
#Pull tiled restart, write to temp file
#Stupidly writing temp restart to reuse old code
@@ -130,26 +132,38 @@ if __name__ == "__main__":
#Close input
iH5.close()
if (doUp):
print("Upscaling data ...")
#Do upscaling
Xr,Yr,Zr = upscl.upGrid(X,Y,Z)
Gr = upscl.upGas(X,Y,Z,G,Xr.T,Yr.T,Zr.T)
FluxR = upscl.upFlux(X,Y,Z,M,Xr,Yr,Zr)
oGr = upscl.upGas(X,Y,Z,oG,Xr.T,Yr.T,Zr.T)
oFluxR = upscl.upFlux(X,Y,Z,oM,Xr,Yr,Zr)
if (doGas0):
G0r = upscl.upGas(X,Y,Z,G0,Xr.T,Yr.T,Zr.T)
if (doRescale):
if (doUp):
print("Upscaling data ...")
#Do upscaling
Xr,Yr,Zr = upscl.upGrid(X,Y,Z)
Gr = upscl.upGas(X,Y,Z,G,Xr.T,Yr.T,Zr.T)
FluxR = upscl.upFlux(X,Y,Z,M,Xr,Yr,Zr)
oGr = upscl.upGas(X,Y,Z,oG,Xr.T,Yr.T,Zr.T)
oFluxR = upscl.upFlux(X,Y,Z,oM,Xr,Yr,Zr)
if (doGas0):
G0r = upscl.upGas(X,Y,Z,G0,Xr.T,Yr.T,Zr.T)
else:
print("Downscaling data ...")
Xr,Yr,Zr = upscl.downGrid(X,Y,Z)
Gr = upscl.downGas(X,Y,Z,G,Xr.T,Yr.T,Zr.T)
FluxR = upscl.downFlux(X,Y,Z,M,Xr,Yr,Zr)
oGr = upscl.downGas(X,Y,Z,oG,Xr.T,Yr.T,Zr.T)
oFluxR = upscl.downFlux(X,Y,Z,oM,Xr,Yr,Zr)
if (doGas0):
G0r = upscl.downGas(X,Y,Z,G0,Xr.T,Yr.T,Zr.T)
else:
print("Downscaling data ...")
Xr,Yr,Zr = upscl.downGrid(X,Y,Z)
Gr = upscl.downGas(X,Y,Z,G,Xr.T,Yr.T,Zr.T)
FluxR = upscl.downFlux(X,Y,Z,M,Xr,Yr,Zr)
oGr = upscl.downGas(X,Y,Z,oG,Xr.T,Yr.T,Zr.T)
oFluxR = upscl.downFlux(X,Y,Z,oM,Xr,Yr,Zr)
#No rescale, just set variables
Xr = X.T #Adding transpose to be consistent w/ rescaling code
Yr = Y.T
Zr = Z.T
Gr = G
FluxR = M
oGr = oG
oFluxR = oM
if (doGas0):
G0r = upscl.downGas(X,Y,Z,G0,Xr.T,Yr.T,Zr.T)
G0r = G0
#Write out grid to restart
oH5.create_dataset("X",data=Xr.T)
oH5.create_dataset("Y",data=Yr.T)
@@ -167,7 +181,10 @@ if __name__ == "__main__":
oH5.close()
#Split up upscaled file
upscl.PushRestartMPI(outid,nRes,oRi,oRj,oRk,Xr.T,Yr.T,Zr.T,Gr,FluxR,oGr,oFluxR,fTmp2X,G0r)
if (doGas0):
upscl.PushRestartMPI(outid,nRes,oRi,oRj,oRk,Xr.T,Yr.T,Zr.T,Gr,FluxR,oGr,oFluxR,fTmp2X,G0r)
else:
upscl.PushRestartMPI(outid,nRes,oRi,oRj,oRk,Xr.T,Yr.T,Zr.T,Gr,FluxR,oGr,oFluxR,fTmp2X)
#Delete temp files
if (not doKeep):

View File

@@ -8,7 +8,7 @@ import xml.etree.ElementTree as et
import xml.dom.minidom
import numpy as np
presets = {"gam", "mhdrcm_iono", "mhdrcm_eq", "mhdrcm_bmin", "rcm3D"}
presets = {"gam", "mhdrcm_eq", "mhdrcm_bmin"}
def getDimInfo(h5fname,s0IDstr,preset):
@@ -197,17 +197,24 @@ if __name__ == "__main__":
rcmInfo['rcmKs'] = rcmKs
#Get variable information
#T = kh5.getTs(h5fname,sIDs)
T = np.array([])
print("Getting variable information")
Nt = len(sIDstrs)
T = np.zeros(Nt)
# Also get file info, in case any of the steps are ExternalLinks to other files
# Assume this is done at the step level
fNames_link = [""]*Nt
steps_link = [""]*Nt
with h5.File(h5fname,'r') as f5:
#T = np.array([f5[k].attrs['time'] for k in f5.keys() if "Step" in k])
for sIDstr in sIDstrs:
for i,sIDstr in enumerate(sIDstrs):
if 'time' in f5[sIDstr].attrs.keys():
T = np.append(T, f5[sIDstr].attrs['time'])
T[i] = f5[sIDstr].attrs['time']
else:
T = np.append(T, int(sIDstr.split("#")[1]))
T[i] = int(sIDstr.split("#")[1])
fNames_link[i] = f5[sIDstr].file.filename.split('/')[-1] # !!NOTE: This means the xdmf file must live in the same directory as the data files
steps_link[i] = int(f5[sIDstr].name.split('#')[1])
#steps = np.array([k for k in f5.keys() if "Step" in k])
Nt = len(T)
print("Getting Vars and RootVars")
vIds ,vLocs = kxmf.getVars(h5fname,s0str,gDims)
rvIds,rvLocs = kxmf.getRootVars(h5fname,gDims)
@@ -236,6 +243,7 @@ if __name__ == "__main__":
TGrid.set("CollectionType","Temporal")
#Loop over time slices
print("Writing info for each step")
for n in range(Nt):
nStp = sIDs[n]
Grid = et.SubElement(TGrid,"Grid")
@@ -253,9 +261,9 @@ if __name__ == "__main__":
if doAppendStep:
stepStr = sIDstrs[n]
sgVars = [os.path.join(stepStr, v) for v in gridVars]
kxmf.AddGrid(h5fname,Geom,gDimStr,sgVars)
kxmf.AddGrid(fNames_link[n],Geom,gDimStr,sgVars)
else:
kxmf.AddGrid(h5fname,Geom,gDimStr,gridVars)
kxmf.AddGrid(fNames_link[n],Geom,gDimStr,gridVars)
Time = et.SubElement(Grid,"Time")
Time.set("Value","%f"%T[n])
@@ -268,11 +276,11 @@ if __name__ == "__main__":
#--------------------------------
#Step variables
for v in range(Nv):
kxmf.AddData(Grid,h5fname,vIds[v],vLocs[v],vDimStr,nStp)
kxmf.AddData(Grid,fNames_link[n],vIds[v],vLocs[v],vDimStr,steps_link[n])
#--------------------------------
#Base grid variables
for v in range(Nrv):
kxmf.AddData(Grid,h5fname,rvIds[v],rvLocs[v],vDimStr)
kxmf.AddData(Grid,fNames_link[n],rvIds[v],rvLocs[v],vDimStr)
if doAddRCMVars:
addRCMVars(Grid, dimInfo, rcmInfo, sIDs[n])
@@ -287,4 +295,4 @@ if __name__ == "__main__":
print("Saving as {}".format(fOutXML))
with open(fOutXML,"w") as f:
f.write(xmlStr)

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python
#Joins decomposed DB files into single
# Joins decomposed eb files generated using "Parallel In Time" into a single file
# Example: bit.ly/3OQg71F
import argparse
from argparse import RawTextHelpFormatter
@@ -8,11 +9,12 @@ import h5py
import os
import kaipy.kaiH5 as kh5
import glob
import kaipy.kdefs as kd
tEps = 1.0e-3 #Small time
#Create new file w/ same root vars/attributes as old
def createfile(fIn,fOut):
def createfile(fIn,fOut,doLink=False):
print('Creating new output file:',fOut)
iH5 = h5py.File(fIn,'r')
oH5 = h5py.File(fOut,'w')
@@ -23,13 +25,20 @@ def createfile(fIn,fOut):
aStr = str(k)
oH5.attrs.create(k,iH5.attrs[aStr])
print("\t%s"%(aStr))
#Copy root groups
#Copy root groups
print("Copying root variables ...")
for Q in iH5.keys():
sQ = str(Q)
#Skip cache, we add it later
if kd.grpTimeCache in sQ:
continue
#Don't include stuff that starts with "Step"
if "Step" not in sQ:
oH5.create_dataset(sQ,data=iH5[sQ])
if doLink:
oH5[sQ] = h5py.ExternalLink(fIn, sQ)
else:
oH5.create_dataset(sQ,data=iH5[sQ])
print("\t%s"%(sQ))
iH5.close()
@@ -49,16 +58,22 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-runid',type=str,metavar="runid",default=runid,help="Input run ID (default: %(default)s)")
parser.add_argument('-typeid',type=str,metavar="typeid",default=typeid,help="Input type ID (default: %(default)s)")
parser.add_argument('--link',action='store_true',help="Create links to existing files rather than copy data (default: %(default)s)")
#Finalize parsing
args = parser.parse_args()
runid = args.runid
typeid = args.typeid
dbIns = glob.glob('%s.????.%s.h5'%(runid,typeid))
doLink = args.link
globStr = '%s.????.%s.h5'%(runid,typeid)
dbIns = glob.glob(globStr)
dbIns.sort()
fOut = "%s.%s.h5"%(runid,typeid)
if doLink:
fOut = "%s.%s.link.h5"%(runid,typeid)
else:
fOut = "%s.%s.h5"%(runid,typeid)
N = len(dbIns)
print("Found %d files, writing output to %s"%(N,fOut))
@@ -66,7 +81,13 @@ if __name__ == "__main__":
print("No files found, exiting")
exit()
#Create file w/ attributes and root variables as first file
oH5 = createfile(dbIns[0],fOut)
oH5 = createfile(dbIns[0],fOut, doLink)
# Store and concat timeAttributeCache to add at the very end
timeCacheVars = {}
with h5py.File(dbIns[0], 'r') as tacF:
for k in tacF[kd.grpTimeCache].keys():
timeCacheVars[k] = np.array([], dtype=tacF[kd.grpTimeCache][k].dtype)
s0 = 0 #Current step
nowTime = 0.0
@@ -84,6 +105,13 @@ if __name__ == "__main__":
print("\tWriting to %d to %d"%(s0,s0+dN-1))
iH5 = h5py.File(fIn,'r')
# Grow timeAttributeCache
for k in iH5[kd.grpTimeCache].keys():
data = iH5[kd.grpTimeCache][k][:]
if k == 'step':
data += s0 # Cache for merged h5 file needs to remap original steps to their position in merged file
timeCacheVars[k] = np.append(timeCacheVars[k], data, axis=0)
#Loop over steps in the input file
for s in range(nS,nE+1):
#Input
@@ -100,23 +128,32 @@ if __name__ == "__main__":
#Good value, update old time
oldTime = nowTime
oH5.create_group(ogStr)
print("Copying %s to %s"%(igStr,ogStr))
if doLink:
oH5[ogStr] = h5py.ExternalLink(fIn, igStr)
else:
oH5.create_group(ogStr)
print("Copying %s to %s"%(igStr,ogStr))
#Group atts
for k in iH5[igStr].attrs.keys():
#Group atts
for k in iH5[igStr].attrs.keys():
aStr = str(k)
oH5[ogStr].attrs.create(k,iH5[igStr].attrs[aStr])
#print(aStr)
#Group vars
for Q in iH5[igStr].keys():
sQ = str(Q)
#print("\tCopying %s"%(sQ))
oH5[ogStr].create_dataset(sQ,data=iH5[igStr][sQ])
aStr = str(k)
oH5[ogStr].attrs.create(k,iH5[igStr].attrs[aStr])
#print(aStr)
#Group vars
for Q in iH5[igStr].keys():
sQ = str(Q)
oH5[ogStr].create_dataset(sQ,data=iH5[igStr][sQ])
#Update s0
s0 = s0 + 1
iH5.close()
# Write timeAttributeCache to output file
print("Writing " + kd.grpTimeCache)
tag = oH5.create_group(kd.grpTimeCache)
for k in timeCacheVars:
tag.create_dataset(k, data=timeCacheVars[k], dtype=timeCacheVars[k].dtype)
#Done
oH5.close()

View File

@@ -131,6 +131,9 @@ if __name__ == "__main__":
sQ = str(Q)
#print("\tCopying %s"%(sQ))
oH5[gOut].create_dataset(sQ,data=iH5[gIn][sQ])
for k in iH5[gIn][sQ].attrs.keys():
aStr = str(k)
oH5[gOut][sQ].attrs.create(k,iH5[gIn][sQ].attrs[aStr])
# make a new file every Nsf steps
if(n%Nsf==0 and n != 0):
oH5.close()

View File

@@ -136,7 +136,10 @@ if __name__ == "__main__":
sQ = str(Q)
#print("\tCopying %s"%(sQ))
oH5[gOut].create_dataset(sQ,data=iH5[gIn][sQ])
for k in iH5[gIn][sQ].attrs.keys():
aStr = str(k)
#print("\t\tCopying %s"%(aStr))
oH5[gOut][sQ].attrs.create(k,iH5[gIn][sQ].attrs[aStr])
#Close up
iH5.close()
oH5.close()

View File

@@ -28,6 +28,18 @@ from kaipy.solarWind.CUSTOM import DSCOVRNC
import datetime
from astropy.time import Time
from cdasws import CdasWs
import sys
# ANSI color codes for color output to terminal
class Color:
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BOLD = '\033[1m'
END = '\033[0m'
def bxFit(sw, fileType, filename):
def bxFitPlot(bxFit_array):
@@ -51,431 +63,513 @@ def bxFit(sw, fileType, filename):
return coef
def ChkTimes(starttime,endtime):
time_difference = endtime - starttime
hours_difference = time_difference.total_seconds()/3600.0
if (starttime > endtime) or (hours_difference < 2.0):
tsStr = starttime.strftime("%Y-%m-%dT%H:%M:%S")
teStr = endtime.strftime("%Y-%m-%dT%H:%M:%S")
sys.exit("Error! Start time (%s) must be al least 2 hours before the end time (%s)"%(tsStr,teStr))
def printErrMsg(errStr):
print(Color.BOLD+Color.YELLOW+'!!!!!!!!!! ERROR: %s'%(errStr)+ Color.END)
print(Color.BOLD+Color.YELLOW+'!!!!!!!!!! Not writing bcWind.h5 file'+ Color.END)
print(Color.BOLD+Color.YELLOW+'!!!!!!!!!! Contact model developers to proceed'+ Color.END)
sys.exit()
def getPrevDayF107(t0):
tm1 = t0-datetime.timedelta(days=1)
tm1 = tm1.replace(hour=0, minute=0, second=0, microsecond=0)
te1 = tm1.replace(hour=23, minute=59, second=59, microsecond=9999)
tm1r = tm1.strftime("%Y-%m-%dT%H:%M:%SZ")
te1r = te1.strftime("%Y-%m-%dT%H:%M:%SZ")
status,data = cdas.get_data('OMNI2_H0_MRG1HR', ['F10_INDEX1800'], tm1r,te1r)
#daily values so just return first value
prevF107 = data['F10_INDEX1800'][0]
return prevF107
if __name__ == "__main__":
fOut = "bcwind.h5"
mod = "LFM"
t0Str="2010-01-01T00:00:00"
t1Str="2010-01-01T02:00:00"
Ts = 0.0
sigma = 3.0
tOffset = 0.0
obs="OMNI"
filename=None
doBs = True
doEps = False
MainS = """ This script does several things:
1. Fetch OMNI data from CDAWeb between the specified times (must be at least 2 hours in length)
2. Generate standard plots of solar wind data
3. Write output in a model file format.
- "LFM" format will:
a. Generate coefficients for Bx Fit
b. Save a bcwind.h5 file
- "TIEGCM" format will:
a. Compute 15-minute boxcar average lagged by 5 minutes
b. Sub-sample at 5-minutes
c. Write NetCDF IMF data file
"""
fOut = "bcwind.h5"
mod = "LFM"
t0Str="2010-01-01T00:00:00"
t1Str="2010-01-01T02:00:00"
Ts = 0.0
sigma = 3.0
tOffset = 0.0
obs="OMNI"
filename=None
doBs = True
doEps = False
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-t0',type=str,metavar="TStart",default=t0Str,help="Start time in 'YYYY-MM-DDThh:mm:ss' (default: %(default)s)")
parser.add_argument('-t1',type=str,metavar="TStop",default=t1Str,help="End time in 'YYYY-MM-DDThh:mm:ss' (default: %(default)s)")
parser.add_argument('-obs',type=str,metavar="OMNI",default=obs,help="Select spacecraft to obtain observations from (default: %(default)s)")
parser.add_argument('-offset',type=float,metavar="tOffset",default=tOffset,help="Minutes to offset spacecraft observation and simulation t0 (default: %(default)s)")
parser.add_argument('-o',type=str,metavar="wind.h5",default=fOut,help="Output Gamera wind file (default: %(default)s)")
parser.add_argument('-m',type=str,metavar="LFM",default=mod,help="Format to write. Options are LFM or TIEGCM (default: %(default)s)")
parser.add_argument('-TsG',type=float,metavar="GAMERA_TStart",default=Ts,help="Gamera start time [min] (default: %(default)s)")
parser.add_argument('-TsL',type=float,metavar="LFM_TStart",default=Ts,help="LFM start time [min] (default: %(default)s)")
parser.add_argument('-bx', action='store_true',default=False,help="Include Bx through ByC and BzC fit coefficients (default: %(default)s)")
parser.add_argument('-bs', action='store_false',default=True,help="Include Bowshock location (default: %(default)s)")
parser.add_argument('-interp', action='store_true',default=False,help="Include shaded region on plots where data is interpolated (default: %(default)s)")
parser.add_argument('-filter', action='store_true',default=False,help="Include additional filtering of data to remove outlier points (default: %(default)s)")
parser.add_argument('-sig',type=float,metavar="sigma",default=sigma,help="N used in N*sigma used for filtering threshold above which will be thrown out (default: %(default)s)")
parser.add_argument('-eps', action="store_true",default=False,help="Output eps figure. (default: %(default)s)")
parser.add_argument('-fn', type=str,metavar="filename",default=filename,help="Name of Wind file. Only used if obs is WINDF. (default: %(default)s)")
parser.add_argument('-f107', type=float,default=None)
parser.add_argument('-kp', type=float,default=None)
#Finalize parsing
args = parser.parse_args()
#Usually f107 above 300 is not reliable. The daily value could be distorted by flare emissions even if the flare may only last a short time during a day.
maxf107 = 300.0
minMfast = 1.5
fOut = args.o
mod = args.m
TsG = args.TsG
TsL = args.TsL
includeBx = args.bx
doBs = args.bs
plotInterped = args.interp
doCoarseFilter = args.filter
sigma = args.sig
doEps = args.eps
obs = args.obs
MainS = """ This script does several things:
1. Fetch OMNI data from CDAWeb between the specified times (must be at least 2 hours in length)
2. Generate standard plots of solar wind data
3. Write output in a model file format.
- "LFM" format will:
a. Generate coefficients for Bx Fit
b. Save a bcwind.h5 file
- "TIEGCM" format will:
a. Compute 15-minute boxcar average lagged by 5 minutes
b. Sub-sample at 5-minutes
c. Write NetCDF IMF data file
"""
if (obs == 'WINDF' and args.fn is None): raise Exception('Error: WINDF requires -fn to specify a WIND file')
if (obs == 'OMNIW' and args.fn is None): raise Exception('Error: OMNIW requires -fn to specify a WIND file')
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-t0',type=str,metavar="TStart",default=t0Str,help="Start time in 'YYYY-MM-DDThh:mm:ss' (default: %(default)s)")
parser.add_argument('-t1',type=str,metavar="TStop",default=t1Str,help="End time in 'YYYY-MM-DDThh:mm:ss' (default: %(default)s)")
parser.add_argument('-obs',type=str,metavar="OMNI",default=obs,help="Select spacecraft to obtain observations from (default: %(default)s)")
parser.add_argument('-offset',type=float,metavar="tOffset",default=tOffset,help="Minutes to offset spacecraft observation and simulation t0 (default: %(default)s)")
parser.add_argument('-o',type=str,metavar="wind.h5",default=fOut,help="Output Gamera wind file (default: %(default)s)")
parser.add_argument('-m',type=str,metavar="LFM",default=mod,help="Format to write. Options are LFM or TIEGCM (default: %(default)s)")
parser.add_argument('-TsG',type=float,metavar="GAMERA_TStart",default=Ts,help="Gamera start time [min] (default: %(default)s)")
parser.add_argument('-TsL',type=float,metavar="LFM_TStart",default=Ts,help="LFM start time [min] (default: %(default)s)")
parser.add_argument('-bx', action='store_true',default=False,help="Include Bx through ByC and BzC fit coefficients (default: %(default)s)")
parser.add_argument('-bs', action='store_false',default=True,help="Include Bowshock location (default: %(default)s)")
parser.add_argument('-interp', action='store_true',default=False,help="Include shaded region on plots where data is interpolated (default: %(default)s)")
parser.add_argument('-filter', action='store_true',default=False,help="Include additional filtering of data to remove outlier points (default: %(default)s)")
parser.add_argument('-sig',type=float,metavar="sigma",default=sigma,help="N used in N*sigma used for filtering threshold above which will be thrown out (default: %(default)s)")
parser.add_argument('-eps', action="store_true",default=False,help="Output eps figure. (default: %(default)s)")
parser.add_argument('-fn', type=str,metavar="filename",default=filename,help="Name of Wind file. Only used if obs is WINDF. (default: %(default)s)")
parser.add_argument('-f107', type=float,default=None,help="Set f10.7 value to use in bcwind file. Only used if no data available. (default: %(default)s)")
parser.add_argument('-kp', type=float,default=None,help="Set Kp value to use in bcwind file. Only used if no data available. (default: %(default)s)")
parser.add_argument('-safe', action='store_true',default=False,help="Run in SAFE mode. Does not create the h5 file if certain conditions are not met (default: %(default)s)")
#Finalize parsing
args = parser.parse_args()
t0Str = args.t0
t1Str = args.t1
fOut = args.o
mod = args.m
TsG = args.TsG
TsL = args.TsL
includeBx = args.bx
doBs = args.bs
plotInterped = args.interp
doCoarseFilter = args.filter
sigma = args.sig
doEps = args.eps
obs = args.obs
f107Def = args.f107
kpDef = args.kp
inSafeMode = args.safe
tOffset = args.offset
if (obs == 'WINDF' and args.fn is None): raise Exception('Error: WINDF requires -fn to specify a WIND file')
if (obs == 'OMNIW' and args.fn is None): raise Exception('Error: OMNIW requires -fn to specify a WIND file')
fmt='%Y-%m-%dT%H:%M:%S'
t0Str = args.t0
t1Str = args.t1
t0 = datetime.datetime.strptime(t0Str,fmt)
t1 = datetime.datetime.strptime(t1Str,fmt)
t0r = t0.strftime("%Y-%m-%dT%H:%M:%SZ")
t1r = t1.strftime("%Y-%m-%dT%H:%M:%SZ")
tOffset = args.offset
cdas = CdasWs()
fmt='%Y-%m-%dT%H:%M:%S'
# calculating average F10.7 over specified time period, can be converted into a timeseries
# pulling data from CDAWeb database
print('Retrieving f10.7 data from CDAWeb')
try:
statusf107,data = cdas.get_data('OMNI2_H0_MRG1HR', ['F10_INDEX1800','KP1800'], t0r,t1r)
t0 = datetime.datetime.strptime(t0Str,fmt)
t1 = datetime.datetime.strptime(t1Str,fmt)
t0r = t0.strftime("%Y-%m-%dT%H:%M:%SZ")
t1r = t1.strftime("%Y-%m-%dT%H:%M:%SZ")
totalMin = (t1-t0).days*24.0*60.0+(t1-t0).seconds/60
tmin = np.arange(totalMin)
t107 = data['Epoch']
t107min = np.zeros(len(t107))
for i in range(len(t107)):
t107min[i]=(t107[i]-t0).days*24.0*60.0+(t107[i]-t0).seconds/60
ChkTimes(t0,t1)
f107=data['F10_INDEX1800']
f107[f107 == 999.9] = np.nan # removing bad values from mean calculation
avgF107 = np.mean(f107[f107 != 999.9])
print("Average f10.7: ", avgF107)
if (f107[0] == 999.9):
f107[0] = avgF107
print('!!!!!!!!!! Warning: f10.7 starts with a bad value, setting to average value: %d !!!!!!!!!!'%(avgF107))
kp = data['KP1800']
#Linearly interpolating and converting hourly cadence to minutes
f107min = np.interp(tmin, t107min[f107 != 999.9], f107[f107 != 999.9])
if (np.all(kp == 99)):
kpmin = np.interp(tmin, t107min, kp) # if no good values, setting all to bad values
else:
kpmin = np.interp(tmin, t107min[kp != 99], kp[kp!=99]/10.0)
except:
totalMin = (t1-t0).days*24.0*60.0+(t1-t0).seconds/60
tmin = np.arange(totalMin)
totalMin = totalMin-1
if args.f107 is None:
print('No f107.7 is specified. Setting to random value: %d !!!!!' %(100.))
f107 = 100.
else:
f107 = args.f107
f107min = np.ones(int(totalMin))*f107
if args.kp is None:
print("!!!!!!!!!! Warning: No valid Kp data, setting all values in array to 99 !!!!!!!!!!")
kp = 99
else:
kp = args.kp
kpmin = np.ones(int(totalMin))*kp
cdas = CdasWs()
if (obs == 'OMNI'):
fileType = 'OMNI'
filename = 'OMNI_HRO_1MIN.txt'
#obtain 1 minute resolution observations from OMNI dataset
print('Retrieving solar wind data from CDAWeb')
status,fIn = cdas.get_data(
'OMNI_HRO_1MIN',
['BX_GSE','BY_GSE','BZ_GSE',
'Vx','Vy','Vz',
'proton_density','T',
'AE_INDEX','AL_INDEX','AU_INDEX','SYM_H',
'BSN_x','BSN_y','BSN_z'],
t0r,t1r)
# Read the solar wind data into 'sw' object and interpolate over the bad data.
if (doCoarseFilter): print(f"Using Coarse Filtering, removing values {sigma} sigma from the mean")
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType)(fIn,doFilter=doCoarseFilter,sigmaVal=sigma)
# calculating average F10.7 over specified time period, can be converted into a timeseries
# pulling data from CDAWeb database
print('Retrieving f10.7 data from CDAWeb')
elif (obs == 'WIND'):
# CDAS tips.
# use CDAweb to get the name of the spacecraft variables you want, such as "C4_CP_FGM_SPIN"
# then use cdas.get_variables('sp_phys','C4_CP_FGM_SPIN') to get a list of variables
# variable names do not exactly match the cdaweb outputs so check to make sure variables
fileType = 'WIND'
filename = 'WIND'
tBuffer = 20 # Extra padding for propagation
fOMNI = cdas.get_data(
'sp_phys',
'OMNI_HRO_1MIN',
t0-datetime.timedelta(minutes=tBuffer),
t1,#+datetime.timedelta(minutes=tBuffer),
['BX_GSE,BY_GSE,BZ_GSE,Vx,Vy,Vz,proton_density,T,AE_INDEX,AL_INDEX,AU_INDEX,SYM_H,BSN_x']
)
fMFI = cdas.get_data(
'sp_phys',
'WI_H0_MFI',
t0-datetime.timedelta(minutes=tOffset+tBuffer),
t1+datetime.timedelta(minutes=tOffset),
['BGSE']
)
fSWE = cdas.get_data(
'sp_phys',
'WI_K0_SWE',
t0-datetime.timedelta(minutes=tOffset+tBuffer),
t1+datetime.timedelta(minutes=tOffset),
['Np,V_GSE,QF_V,QF_Np,THERMAL_SPD']
)
# What is our X location in km? Approximate C4 location at shock
xloc = 98000.
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType)(fSWE,fMFI,fOMNI,xloc,tOffset,t0,t1)
elif (obs == 'WINDF'):
# Using a WIND file instead of CDAweb
# We still use cdas to get the other OMNI stuff. We'll just grab them inside the class
# Doing it this way, we can specify t0 and t1 based on file input
fileType = 'WIND'
fileType2 = 'WINDF'
filename = args.fn
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(filename)
elif (obs == 'OMNIW'):
fileType = 'OMNI'
fileType2 = 'OMNIW'
filename = args.fn
doBs = True
print("Working with OMNIW algorithm")
# Read the solar wind data into 'sw' object and interpolate over the bad data.
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(filename)
filename = 'OMNIW_'+filename
elif (obs == 'CUSTOM'):
fileType = 'CUSTOM'
fileType2 = 'DSCOVR'
filename = args.fn
doBs = True
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(t0,filename)
filename = fileType2+'_'+filename
try:
statusf107,data = cdas.get_data('OMNI2_H0_MRG1HR', ['F10_INDEX1800','KP1800'], t0r,t1r)
elif (obs == 'DSCOVRNC'):
fileType = 'CUSTOM'
fileType2 = 'DSCOVRNC'
doBs = False
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(t0,t1)
filename = fileType2
else:
raise Exception('Error: Not able to obtain dataset from spacecraft. Please select another mission.')
totalMin = (t1-t0).days*24.0*60.0+(t1-t0).seconds/60
tmin = np.arange(totalMin)
t107 = data['Epoch']
t107min = np.zeros(len(t107))
for i in range(len(t107)):
t107min[i]=(t107[i]-t0).days*24.0*60.0+(t107[i]-t0).seconds/60
f107=data['F10_INDEX1800']
if (np.all(f107 > maxf107)): #bad values set to 999.9 by cdas
if inSafeMode:
printErrMsg('No valid f10.7 data')
# Do output format-specific tasks:
if (mod == 'TIEGCM'):
# Write TIEGCM IMF solar wind file
#FIXME: need to update when want to include, example code in pyLTR.SolarWind.Writer.TIEGCM
raise Exception('Error: Cannot currently produce TIEGCM output.')
elif (mod == 'LFM'):
if (includeBx):
print("\tUsing Bx fields")
# Bx Fit
bCoef=bxFit( sw, fileType, filename)
# Setting Bx0 to zero to enforce a planar front with no Bx offset
bCoef[0] = 0.0
print(Color.GREEN+'!!!!!!!!!! Warning: No valid f10.7 data, Attempting to take value from previous day !!!!!!!!!!'+Color.END)
prevF107 = getPrevDayF107(t0)
if(prevF107<=maxf107):
print(Color.GREEN+'\tSuccesful. Setting to f10.7 to %f'%(prevF107)+Color.END)
f107[:] = prevF107
else:
print("\tNot using Bx fields")
bCoef = [0.0, 0.0, 0.0]
# Interpolate to one minute:
time_1minute = range(int(sw.data.getData('time_min').min()),
int(sw.data.getData('time_min').max()) )
n = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('n'))
tp = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('t'))
vx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vx'))
vy = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vy'))
vz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vz'))
cs = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('cs'))
va = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('va'))
bx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('bx'))
by = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('by'))
bz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('bz'))
b = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('b'))
try:
ae = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('ae'))
except:
ae = np.zeros(len(time_1minute))
try:
al = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('al'))
except:
al = np.zeros(len(time_1minute))
try:
au = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('au'))
except:
au = np.zeros(len(time_1minute))
try:
symh = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('symh'))
except:
symh = np.zeros(len(time_1minute))
if doBs:
bsx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('xBS'))
bsy = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('yBS'))
bsz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('zBS'))
#grab info on where data is interpolated to include on plots if wanted
interped = np.zeros((11,len(symh)))
interped[0,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isBxInterped'))
interped[1,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isByInterped'))
interped[2,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isBzInterped'))
interped[3,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVxInterped'))
interped[4,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVyInterped'))
interped[5,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVzInterped'))
interped[6,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isNInterped'))
try:
interped[7,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isCsInterped'))
except:
interped[7,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isTInterped'))
if doBs:
interped[8,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isxBSInterped'))
interped[9,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isyBSInterped'))
interped[10,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('iszBSInterped'))
#finding locations where any variable is interpolated
isInterp=np.any(interped,axis=0)
pltInterp = np.zeros(len(isInterp),dtype=bool)
if (plotInterped):
pltInterp = isInterp
# calculating fast magnetosonic mach number
mfast = np.sqrt((vx**2+vy**2+vz**2)/(cs**2+va**2))
#initalize matrix to hold solar wind data
if doBs:
lfmD = np.zeros((n.shape[0],21))
else:
lfmD = np.zeros((n.shape[0],18))
date = sw.data.getData('meta')['Start date']
nSub = 0
for i,time in enumerate(time_1minute):
# Convert relevant quantities to SM Coordinates
v_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), vx[i],vy[i],vz[i])
b_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), bx[i],by[i],bz[i])
if doBs:
bs_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), bsx[i],bsy[i],bsz[i])
tilt = sw._getTiltAngle(date+datetime.timedelta(minutes=time))
if doBs:
lfmD[i] = [time,n[i],v_sm[0],v_sm[1],v_sm[2],cs[i],b_sm[0],b_sm[1],b_sm[2],b[i],tilt,ae[i],al[i],au[i],symh[i],tp[i],va[i],mfast[i],bs_sm[0],bs_sm[1],bs_sm[2]]
if f107Def is not None:
print(Color.GREEN+'!!!!!!!!!! Warning: No valid f10.7 data on previous day either. Setting f10.7 to %f!!!!!!!!!!'%(f107Def)+Color.END)
f107[:] = f107Def
else:
lfmD[i] = [time,n[i],v_sm[0],v_sm[1],v_sm[2],cs[i],b_sm[0],b_sm[1],b_sm[2],b[i],tilt,ae[i],al[i],au[i],symh[i],tp[i],va[i],mfast[i]]
if mfast[i] < 2:
nSub += 1
sys.exit(Color.YELLOW+'!!!!!!!!!! Error: No valid f10.7 data on previous day either. Set f10.7 to use with -f107 flag !!!!!!!!!!'+Color.END)
elif (f107[0] > maxf107):
indF = np.where(f107<maxf107)[0][0]
F107start = f107[indF]
f107[0] = F107start
print(Color.GREEN+'!!!!!!!!!! Warning: f10.7 starts with a bad value (>%d), setting initial value to first good value: %f !!!!!!!!!!'%(maxf107,F107start)+Color.END)
if nSub > 0:
print()
print("!!!!!!!!!! WARNING LOW MACH NUMBER: Mfast < 2 for %d minutes !!!!!!!!!!"%(nSub))
print()
#Linearly interpolating and converting hourly cadence to minutes
f107min = np.interp(tmin, t107min[f107 < maxf107], f107[f107 < maxf107] )
print("Converting to Gamera solar wind file")
Nt,Nv = lfmD.shape
print("\tFound %d variables and %d lines"%(Nv,Nt))
kp = data['KP1800']
if (np.all(kp == 99)):
if inSafeMode:
printErrMsg('No valid Kp data')
#Convert LFM time to seconds and reset to start at 0
print("\tOffsetting from LFM start (%5.2f min) to Gamera start (%5.2f min)"%(TsL,TsG))
T0 = lfmD[:,0].min()
T = (lfmD[:,0]-TsL+TsG)*60
#Calculating time in UT
UT = []
[UT.append(np.string_(date+datetime.timedelta(seconds=i)).strip()) for i in T]
#Calculating time in MJD
MJD = []
mjdRef=Time(date).mjd
[MJD.append(mjdRef+i/86400.0) for i in T]
#Density, temperature, magnetic field, and tilt don't require scaling
D = lfmD[:,1]
ThT = lfmD[:,10]
Bx = lfmD[:,6] # overwritten by Gamera using the coefficients
By = lfmD[:,7]
Bz = lfmD[:,8]
#Activity indices do not require scaling
AE = lfmD[:,11]
AL = lfmD[:,12]
AU = lfmD[:,13]
SYMH = lfmD[:,14]
# scaling Temperature from kK->K
Temp = lfmD[:,15]*1.0e+3
#Velocity
vScl = 1.0e+3 #km/s->m/s
Vx = vScl*lfmD[:,2]
Vy = vScl*lfmD[:,3]
Vz = vScl*lfmD[:,4]
Cs = vScl*lfmD[:,5] #km/s->m/s
Va = vScl*lfmD[:,16]
Mfast = lfmD[:,17]
#Bowshock position
if doBs:
xBS = lfmD[:,18]
yBS = lfmD[:,19]
zBS = lfmD[:,20]
# Save a plot of the solar wind data.
if doEps:
swPlotFilename = os.path.basename(filename) + '.eps'
if kpDef is not None:
print(Color.BLUE+"!!!!!!!!!! Warning: No valid Kp data, setting all values in array to %d!!!!!!!!!!"%(kpDef)+Color.END)
kp[:] = kpDef
kpmin = np.interp(tmin, t107min, kp) # if no good values, setting all to bad values
else:
swPlotFilename = os.path.basename(filename) + '.png'
print('Saving "%s"' % swPlotFilename)
if doBs:
kaipy.solarWind.swBCplots.swQuickPlot(UT,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,swPlotFilename,xBS,yBS,zBS,doEps=doEps)
else:
kaipy.solarWind.swBCplots.swQuickPlot(UT,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,swPlotFilename,doEps=doEps)
print("Writing Gamera solar wind to %s"%(fOut))
with h5py.File(fOut,'w') as hf:
hf.create_dataset("T" ,data=T)
hf.create_dataset("UT",data=UT)
hf.create_dataset("MJD",data=MJD)
hf.create_dataset("D" ,data=D)
hf.create_dataset("Temp" ,data=Temp)
hf.create_dataset("Vx",data=Vx)
hf.create_dataset("Vy",data=Vy)
hf.create_dataset("Vz",data=Vz)
hf.create_dataset("Bx",data=Bx)
hf.create_dataset("By",data=By)
hf.create_dataset("Bz",data=Bz)
hf.create_dataset("tilt",data=ThT)
hf.create_dataset("ae",data=AE)
hf.create_dataset("al",data=AL)
hf.create_dataset("au",data=AU)
hf.create_dataset("symh",data=SYMH)
hf.create_dataset("Interped",data=1*isInterp)
hf.create_dataset("f10.7",data=f107min)
hf.create_dataset("Kp",data=kpmin)
hf.create_dataset("Bx0",data=bCoef[0])
hf.create_dataset("ByC",data=bCoef[1])
hf.create_dataset("BzC",data=bCoef[2])
hf.create_dataset("Va",data=Va)
hf.create_dataset("Cs",data=Cs)
if doBs:
hf.create_dataset("xBS",data=xBS)
hf.create_dataset("yBS",data=yBS)
hf.create_dataset("zBS",data=zBS)
hf.create_dataset("Magnetosonic Mach",data=Mfast)
sys.exit(Color.YELLOW+'!!!!!!!!!! Error: No valid Kp data. Set Kp to use with -kp flag !!!!!!!!!!'+Color.END)
else:
raise Exception('Error: Misunderstood output file format.')
if (kp[0] == 99):
indF = np.where(kp!=99)[0][0]
KpStart = kp[indF]
kp[0] = KpStart
print(Color.BLUE+'!!!!!!!!!! Warning: Kp starts with a bad value, setting to first good value: %d !!!!!!!!!!'%(KpStart)+Color.END)
kpmin = np.interp(tmin, t107min[kp != 99], kp[kp!=99]/10.0)
except Exception as e:
if isinstance(e, SystemExit):
raise # Re-raise SystemExit exception
else:
if inSafeMode:
printErrMsg('Issue pulling f10.7 and kp data from OMNI, need to be set manually.')
print(Color.DARKCYAN+"+'!!!!!!!!!! Issue pulling f10.7 and kp data from OMNI, setting manually"+Color.END)
totalMin = (t1-t0).days*24.0*60.0+(t1-t0).seconds/60
tmin = np.arange(totalMin)
totalMin = totalMin-1
if f107Def is None:
sys.exit(Color.YELLOW+'!!!!!!!!!! Error: Default f10.7 is not set. Update using -f107 flag at execution !!!!!!!!!!'+Color.END)
else:
print(Color.DARKCYAN+'\tSetting f10.7 to: %f !!!!!' %(f107Def)+Color.END)
f107min = np.ones(int(totalMin))*f107Def
if kpDef is None:
sys.exit(Color.YELLOW+'!!!!!!!!!! Error: Default Kp is not set. Update using -kp flag at execution !!!!!!!!!!'+Color.END)
else:
print(Color.DARKCYAN+'Setting kp to: %f (can be changed with -kp flag at execution) !!!!!' %(kpDef)+Color.END)
kpmin = np.ones(int(totalMin))*kpDef
if (obs == 'OMNI'):
fileType = 'OMNI'
filename = 'OMNI_HRO_1MIN.txt'
#obtain 1 minute resolution observations from OMNI dataset
print('Retrieving solar wind data from CDAWeb')
status,fIn = cdas.get_data(
'OMNI_HRO_1MIN',
['BX_GSE','BY_GSE','BZ_GSE',
'Vx','Vy','Vz',
'proton_density','T',
'AE_INDEX','AL_INDEX','AU_INDEX','SYM_H',
'BSN_x','BSN_y','BSN_z'],
t0r,t1r)
# Read the solar wind data into 'sw' object and interpolate over the bad data.
if (doCoarseFilter): print(f"Using Coarse Filtering, removing values {sigma} sigma from the mean")
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType)(fIn,doFilter=doCoarseFilter,sigmaVal=sigma)
elif (obs == 'WIND'):
# CDAS tips.
# use CDAweb to get the name of the spacecraft variables you want, such as "C4_CP_FGM_SPIN"
# then use cdas.get_variables('sp_phys','C4_CP_FGM_SPIN') to get a list of variables
# variable names do not exactly match the cdaweb outputs so check to make sure variables
fileType = 'WIND'
filename = 'WIND'
tBuffer = 20 # Extra padding for propagation
fOMNI = cdas.get_data(
'sp_phys',
'OMNI_HRO_1MIN',
t0-datetime.timedelta(minutes=tBuffer),
t1,#+datetime.timedelta(minutes=tBuffer),
['BX_GSE,BY_GSE,BZ_GSE,Vx,Vy,Vz,proton_density,T,AE_INDEX,AL_INDEX,AU_INDEX,SYM_H,BSN_x']
)
fMFI = cdas.get_data(
'sp_phys',
'WI_H0_MFI',
t0-datetime.timedelta(minutes=tOffset+tBuffer),
t1+datetime.timedelta(minutes=tOffset),
['BGSE']
)
fSWE = cdas.get_data(
'sp_phys',
'WI_K0_SWE',
t0-datetime.timedelta(minutes=tOffset+tBuffer),
t1+datetime.timedelta(minutes=tOffset),
['Np,V_GSE,QF_V,QF_Np,THERMAL_SPD']
)
# What is our X location in km? Approximate C4 location at shock
xloc = 98000.
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType)(fSWE,fMFI,fOMNI,xloc,tOffset,t0,t1)
elif (obs == 'WINDF'):
# Using a WIND file instead of CDAweb
# We still use cdas to get the other OMNI stuff. We'll just grab them inside the class
# Doing it this way, we can specify t0 and t1 based on file input
fileType = 'WIND'
fileType2 = 'WINDF'
filename = args.fn
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(filename)
elif (obs == 'OMNIW'):
fileType = 'OMNI'
fileType2 = 'OMNIW'
filename = args.fn
doBs = True
print("Working with OMNIW algorithm")
# Read the solar wind data into 'sw' object and interpolate over the bad data.
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(filename)
filename = 'OMNIW_'+filename
elif (obs == 'CUSTOM'):
fileType = 'CUSTOM'
fileType2 = 'DSCOVR'
filename = args.fn
doBs = True
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(t0,filename)
filename = fileType2+'_'+filename
elif (obs == 'DSCOVRNC'):
fileType = 'CUSTOM'
fileType2 = 'DSCOVRNC'
doBs = False
sw = eval('kaipy.solarWind.'+fileType+'.'+fileType2)(t0,t1)
filename = fileType2
else:
raise Exception('Error: Not able to obtain dataset from spacecraft. Please select another mission.')
# Do output format-specific tasks:
if (mod == 'TIEGCM'):
# Write TIEGCM IMF solar wind file
#FIXME: need to update when want to include, example code in pyLTR.SolarWind.Writer.TIEGCM
raise Exception('Error: Cannot currently produce TIEGCM output.')
elif (mod == 'LFM'):
if (includeBx):
print("\tUsing Bx fields")
# Bx Fit
bCoef=bxFit( sw, fileType, filename)
# Setting Bx0 to zero to enforce a planar front with no Bx offset
bCoef[0] = 0.0
else:
print("\tNot using Bx fields")
bCoef = [0.0, 0.0, 0.0]
# Interpolate to one minute:
time_1minute = range(int(sw.data.getData('time_min').min()),
int(sw.data.getData('time_min').max()) )
n = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('n'))
tp = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('t'))
vx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vx'))
vy = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vy'))
vz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('vz'))
cs = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('cs'))
va = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('va'))
bx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('bx'))
by = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('by'))
bz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('bz'))
b = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('b'))
try:
ae = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('ae'))
except:
ae = np.zeros(len(time_1minute))
try:
al = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('al'))
except:
al = np.zeros(len(time_1minute))
try:
au = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('au'))
except:
au = np.zeros(len(time_1minute))
try:
symh = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('symh'))
except:
symh = np.zeros(len(time_1minute))
if doBs:
bsx = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('xBS'))
bsy = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('yBS'))
bsz = np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('zBS'))
#grab info on where data is interpolated to include on plots if wanted
interped = np.zeros((11,len(symh)))
interped[0,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isBxInterped'))
interped[1,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isByInterped'))
interped[2,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isBzInterped'))
interped[3,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVxInterped'))
interped[4,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVyInterped'))
interped[5,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isVzInterped'))
interped[6,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isNInterped'))
try:
interped[7,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isCsInterped'))
except:
interped[7,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isTInterped'))
if doBs:
interped[8,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isxBSInterped'))
interped[9,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('isyBSInterped'))
interped[10,:] =np.interp(time_1minute, sw.data.getData('time_min'), sw.data.getData('iszBSInterped'))
#finding locations where any variable is interpolated
isInterp=np.any(interped,axis=0)
pltInterp = np.zeros(len(isInterp),dtype=bool)
if (plotInterped):
pltInterp = isInterp
# calculating fast magnetosonic mach number
mfast = np.sqrt((vx**2+vy**2+vz**2)/(cs**2+va**2))
#initalize matrix to hold solar wind data
if doBs:
lfmD = np.zeros((n.shape[0],21))
else:
lfmD = np.zeros((n.shape[0],18))
date = sw.data.getData('meta')['Start date']
nSub = 0
vxSub = []
for i,time in enumerate(time_1minute):
# Convert relevant quantities to SM Coordinates
v_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), vx[i],vy[i],vz[i])
b_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), bx[i],by[i],bz[i])
if doBs:
bs_sm = sw._gsm2sm(date+datetime.timedelta(minutes=time), bsx[i],bsy[i],bsz[i])
tilt = sw._getTiltAngle(date+datetime.timedelta(minutes=time))
if doBs:
lfmD[i] = [time,n[i],v_sm[0],v_sm[1],v_sm[2],cs[i],b_sm[0],b_sm[1],b_sm[2],b[i],tilt,ae[i],al[i],au[i],symh[i],tp[i],va[i],mfast[i],bs_sm[0],bs_sm[1],bs_sm[2]]
else:
lfmD[i] = [time,n[i],v_sm[0],v_sm[1],v_sm[2],cs[i],b_sm[0],b_sm[1],b_sm[2],b[i],tilt,ae[i],al[i],au[i],symh[i],tp[i],va[i],mfast[i]]
if mfast[i] < minMfast:
nSub += 1
vxSub.append(v_sm[0])
if nSub > 0:
import kaipy.gamera.gamGrids as gg
#Pull defaul LFM grid
gIn = "./lfmG"
Nc0 = 8 #Number of outer i cells to cut out from LFM grid (OCT)
xx0,yy0 = gg.LoadTabG(gIn,Nc0)
#Calculate Rout in sunward direction from grid
Rout = np.sqrt(xx0[-1,0]**2.0 + yy0[-1,0]**2.0) #[Re]
Re_km = 6378.1
maxVsub = abs(max(vxSub))
nSubCrit = (Rout*Re_km)/maxVsub/60.0 # mins
if inSafeMode and (nSub > nSubCrit):
printErrMsg("Low Mach number solar wind persists for too long (%d minutes)"%(nSub))
print()
print(Color.CYAN+"!!!!!!!!!! WARNING LOW MACH NUMBER: Mfast < %.3f for %d minutes, may want to extend grid !!!!!!!!!!"%(minMfast,nSub)+Color.END)
print()
print("Converting to Gamera solar wind file")
Nt,Nv = lfmD.shape
print("\tFound %d variables and %d lines"%(Nv,Nt))
#Convert LFM time to seconds and reset to start at 0
print("\tOffsetting from LFM start (%5.2f min) to Gamera start (%5.2f min)"%(TsL,TsG))
T0 = lfmD[:,0].min()
T = (lfmD[:,0]-TsL+TsG)*60
#Calculating time in UT
UT = []
[UT.append(np.string_(date+datetime.timedelta(seconds=i)).strip()) for i in T]
#Calculating time in MJD
MJD = []
mjdRef=Time(date).mjd
[MJD.append(mjdRef+i/86400.0) for i in T]
#Density, temperature, magnetic field, and tilt don't require scaling
D = lfmD[:,1]
ThT = lfmD[:,10]
Bx = lfmD[:,6] # overwritten by Gamera using the coefficients
By = lfmD[:,7]
Bz = lfmD[:,8]
#Activity indices do not require scaling
AE = lfmD[:,11]
AL = lfmD[:,12]
AU = lfmD[:,13]
SYMH = lfmD[:,14]
# scaling Temperature from kK->K
Temp = lfmD[:,15]*1.0e+3
#Velocity
vScl = 1.0e+3 #km/s->m/s
Vx = vScl*lfmD[:,2]
Vy = vScl*lfmD[:,3]
Vz = vScl*lfmD[:,4]
Cs = vScl*lfmD[:,5] #km/s->m/s
Va = vScl*lfmD[:,16]
Mfast = lfmD[:,17]
#Bowshock position
if doBs:
xBS = lfmD[:,18]
yBS = lfmD[:,19]
zBS = lfmD[:,20]
# Save a plot of the solar wind data.
if doEps:
swPlotFilename = os.path.basename(filename) + '.eps'
else:
swPlotFilename = os.path.basename(filename) + '.png'
print('Saving "%s"' % swPlotFilename)
if doBs:
kaipy.solarWind.swBCplots.swQuickPlot(UT,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,swPlotFilename,xBS,yBS,zBS,doEps=doEps)
else:
kaipy.solarWind.swBCplots.swQuickPlot(UT,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,swPlotFilename,doEps=doEps)
print("Writing Gamera solar wind to %s"%(fOut))
with h5py.File(fOut,'w') as hf:
hf.create_dataset("T" ,data=T)
hf.create_dataset("UT",data=UT)
hf.create_dataset("MJD",data=MJD)
hf.create_dataset("D" ,data=D)
hf.create_dataset("Temp" ,data=Temp)
hf.create_dataset("Vx",data=Vx)
hf.create_dataset("Vy",data=Vy)
hf.create_dataset("Vz",data=Vz)
hf.create_dataset("Bx",data=Bx)
hf.create_dataset("By",data=By)
hf.create_dataset("Bz",data=Bz)
hf.create_dataset("tilt",data=ThT)
hf.create_dataset("ae",data=AE)
hf.create_dataset("al",data=AL)
hf.create_dataset("au",data=AU)
hf.create_dataset("symh",data=SYMH)
hf.create_dataset("Interped",data=1*isInterp)
hf.create_dataset("f10.7",data=f107min)
hf.create_dataset("Kp",data=kpmin)
hf.create_dataset("Bx0",data=bCoef[0])
hf.create_dataset("ByC",data=bCoef[1])
hf.create_dataset("BzC",data=bCoef[2])
hf.create_dataset("Va",data=Va)
hf.create_dataset("Cs",data=Cs)
if doBs:
hf.create_dataset("xBS",data=xBS)
hf.create_dataset("yBS",data=yBS)
hf.create_dataset("zBS",data=zBS)
hf.create_dataset("Magnetosonic Mach",data=Mfast)
else:
raise Exception('Error: Misunderstood output file format.')

View File

@@ -37,6 +37,7 @@ if __name__ == "__main__":
L_kt = 10
wolfP1 = 3
wolfP2 = 1
maxKp = 6
plotChoices = ['none', 'spec', 'vs']
@@ -57,6 +58,7 @@ if __name__ == "__main__":
parser.add_argument('--nop',action='store_true',default=False,help="Do not add zero loss first channel (default: %(default)s)")
parser.add_argument('--noWaveModel',action='store_true',default=False, help="Don't use wave models in the electron/ion loss (default: %(default)s)")
parser.add_argument('--addWM', action='store_true',default=False, help="Add wave models to an existing rcmconfig file, input file needed to be presented (default: %(default)s)")
parser.add_argument('-maxKp', type=int,default=maxKp, help="Max. Kp index allowed in the electron wave model, integer only (default: %(default)s)")
parser.add_argument('-i', type=str,default=fOut,metavar="fIn", help="Input file name when addWM is true (default: %(default)s)")
@@ -74,12 +76,17 @@ if __name__ == "__main__":
wolfP2 = args.p2
addWM = args.addWM
noWaveModel = args.noWaveModel
maxKp = args.maxKp
fIn = args.i
plotType = args.plotType
if maxKp >= 7:
print ("Maximum Kp allowed is 6. Please re-enter a valid number.")
exit()
if addWM:
tauParams = wmParams(dim = 4, nKp = 7, nMLT = 25, nL = 41, nEk = 155)
genWM.genh5(fIn,fOut,tauParams,useWM = True)
tauParams = wmParams(dim = 4, nKp = maxKp, nMLT = 97, nL = 41, nEk = 155)
genWM.genh5(fIn,fOut,tauParams)
else:
# Determine proton channel limits based on resolving a certain (proton) temperature at given L
bVol = kT.L_to_bVol(L_kt)
@@ -106,8 +113,8 @@ if __name__ == "__main__":
fileIO.saveRCMConfig(alamData,params=alamParams,fname=fOut)
# Add data needed for wavemodel
if not noWaveModel:
tauParams = wmParams(dim = 4, nKp = 7, nMLT = 25, nL = 41, nEk = 155, dimTDS = 1, nEkTDS = 109)
genWM.genh5(fOut,fOut,tauParams,useWM = True)
tauParams = wmParams(dim = 4, nKp = maxKp, nMLT = 97, nL = 41, nEk = 155)
genWM.genh5(fOut,fOut,tauParams)
print("Wrote RCM configuration to %s"%(fOut))

View File

@@ -41,6 +41,7 @@ if __name__ == "__main__":
tpad = args.tpad
swfname = args.swfile
doDPS = args.dps
ftag = args.id
#UT formats for plotting
isotfmt = '%Y-%m-%dT%H:%M:%S.%f'
@@ -86,9 +87,9 @@ if __name__ == "__main__":
gs = gridspec.GridSpec(1,1,hspace=0.05,wspace=0.05)
ax=fig.add_subplot(gs[0,0])
ax.plot(ut_symh,dstD,label="SYM-H",linewidth=2*LW)
ax.plot(ut,BSDst,label="BSDst",linewidth=LW)
ax.plot(ut,BSDst,label="Biot-Savart Dst",linewidth=LW)
if doDPS:
ax.plot(ut,DPSDst,label="DPSDst",linewidth=LW)
ax.plot(ut,DPSDst,label="Dessler-Parker-Sckopke Dst",linewidth=LW)
ax.legend(loc='upper right',fontsize="small",ncol=2)
ax.axhline(color='magenta',linewidth=0.5*LW)
ax.xaxis_date()

233
scripts/quicklook/gamerrVid.py Executable file
View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python
#Make video of error between two Gamera cases
import argparse
from argparse import RawTextHelpFormatter
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import kaipy.kaiViz as kv
import matplotlib.gridspec as gridspec
import numpy as np
import numpy as np
import kaipy.gamera.msphViz as mviz
import kaipy.gamera.magsphere as msph
import kaipy.gamera.rcmpp as rcmpp
from alive_progress import alive_bar
import kaipy.kdefs as kdefs
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import os
import errno
import subprocess
import shutil
cLW = 0.25
def makeMovie(frame_dir,movie_name):
frame_pattern = frame_dir + "/vid.%04d.png"
movie_file = os.getcwd() + "/" + movie_name + ".mp4"
ffmpegExe = "ffmpeg"
if shutil.which(ffmpegExe) is None:
ffmpegExe = "ffmpeg4"
if shutil.which(ffmpegExe) is None:
print("Could not find any ffmpeg executable. Video will not be generated.")
return
cmd = [
ffmpegExe, "-nostdin", "-i", frame_pattern,
"-vcodec", "libx264", "-crf", "14", "-profile:v", "high", "-pix_fmt", "yuv420p",
movie_file,"-y"
]
subprocess.run(cmd, check=True)
if __name__ == "__main__":
#Defaults
fdir1 = os.getcwd()
ftag1 = "msphere"
fdir2 = os.getcwd()
ftag2 = "msphere"
oDir = "vid2D"
ts = 0 #[min]
te = 200 #[min]
dt = 60.0 #[sec]
Nblk = 1 #Number of blocks
nID = 1 #Block ID of this job
noMPI = False # Don't add MPI tiling
noLog = False
fieldNames = "Bx, By, Bz"
doVerb = False
skipMovie = False
MainS = """Creates simple multi-panel figure for Gamera magnetosphere run
Left Panel - Residual vertical magnetic field
Right Panel - Pressure (or density) and hemispherical insets
"""
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-d1',type=str,metavar="directory",default=fdir1,help="Directory to read first dataset from (default: %(default)s)")
parser.add_argument('-id1',type=str,metavar="runid",default=ftag1,help="RunID of first dataset (default: %(default)s)")
parser.add_argument('-d2',type=str,metavar="directory",default=fdir2,help="Directory to read second dataset from (default: %(default)s)")
parser.add_argument('-id2',type=str,metavar="runid",default=ftag2,help="RunID of second dataset (default: %(default)s)")
parser.add_argument('-o',type=str,metavar="directory",default=oDir,help="Subdirectory to write to (default: %(default)s)")
parser.add_argument('-ts' ,type=int,metavar="tStart",default=ts,help="Starting time [min] (default: %(default)s)")
parser.add_argument('-te' ,type=int,metavar="tEnd" ,default=te,help="Ending time [min] (default: %(default)s)")
parser.add_argument('-dt' ,type=int,metavar="dt" ,default=dt,help="Cadence [sec] (default: %(default)s)")
parser.add_argument('-Nblk' ,type=int,metavar="Nblk",default=Nblk,help="Number of job blocks (default: %(default)s)")
parser.add_argument('-nID' ,type=int,metavar="nID" ,default=nID,help="Block ID of this job [1-Nblk] (default: %(default)s)")
parser.add_argument('-f',type=str,metavar="fieldnames",default=fieldNames,help="Comma-separated fields to plot (default: %(default)s)")
parser.add_argument('-linear',action='store_true', default=noLog,help="Plot linear line plot instead of logarithmic (default: %(default)s)")
parser.add_argument('-v',action='store_true', default=doVerb,help="Do verbose output (default: %(default)s)")
parser.add_argument('-skipMovie',action='store_true', default=skipMovie,help="Skip automatic movie generation afterwards (default: %(default)s)")
#parser.add_argument('-nompi', action='store_true', default=noMPI,help="Don't show MPI boundaries (default: %(default)s)")
mviz.AddSizeArgs(parser)
#Finalize parsing
args = parser.parse_args()
fdir1 = args.d1
ftag1 = args.id1
fdir2 = args.d2
ftag2 = args.id2
ts = args.ts
te = args.te
dt = args.dt
oSub = args.o
Nblk = args.Nblk
nID = args.nID
fieldNames = args.f
noLog = args.linear
doVerb = args.v
#noMPI = args.noMPI
fnList = [item.strip() for item in fieldNames.split(',')]
#Setup timing info
tOut = np.arange(ts*60.0,te*60.0,dt)
Nt = len(tOut)
vO = np.arange(0,Nt)
print("Writing %d outputs between minutes %d and %d"%(Nt,ts,te))
if (Nblk>1):
#Figure out work bounds
dI = (Nt//Nblk)
i0 = (nID-1)*dI
i1 = i0+dI
if (nID == Nblk):
i1 = Nt #Make sure we get last bit
print("\tBlock #%d: %d to %d"%(nID,i0,i1))
else:
i0 = 0
i1 = Nt
#Setup output directory
oDir = os.getcwd() + "/" + oSub
print("Writing output to %s"%(oDir))
#Check/create directory if necessary
if (not os.path.exists(oDir)):
try:
print("Creating directory %s"%(oDir))
os.makedirs(oDir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(oDir):
pass
else:
raise
#Get domain size
xyBds = mviz.GetSizeBds(args)
#---------
#Figure parameters
figSz = (12,7.5)
#======
#Init data
gsph1 = msph.GamsphPipe(fdir1,ftag1)
gsph2 = msph.GamsphPipe(fdir2,ftag2)
#======
#Setup figure
fig = plt.figure(figsize=figSz)
gs = gridspec.GridSpec(5,2,height_ratios=[20,5,1,5,9],hspace=0.025)
AxTL = fig.add_subplot(gs[0,0])
AxTR = fig.add_subplot(gs[0,1])
AxB = fig.add_subplot(gs[-1,0:2])
AxB2 = AxB.twinx() # second plot on bottom axis
AxCT = fig.add_subplot(gs[2,0:2])
print(fig.axes)
errTimes = []
errListRel = []
errListAbs = []
relColor = "tab:blue"
absColor = "tab:orange"
#Loop over sub-range
titstr = "Comparing '%s' to '%s'"%(fdir1,fdir2)
with alive_bar(i1-i0,title=titstr.ljust(kdefs.barLab),length=kdefs.barLen,disable=doVerb) as bar:
for i in range(i0,i1):
#Convert time (in seconds) to Step #
nStp = np.abs(gsph1.T-tOut[i]).argmin()+gsph1.s0
if doVerb:
print("Minute = %5.2f / Step = %d"%(tOut[i]/60.0,nStp))
npl = vO[i]
AxTL.clear()
AxTR.clear()
AxB.clear()
AxB2.clear()
#plot upper left msph error
mviz.PlotEqErrRel(gsph1,gsph2,nStp,xyBds,AxTL,fnList,AxCB=AxCT,doVerb=doVerb)
AxTL.set_title("Equatorial Slice of Relative Error")
#plot upper right k-axis error
mviz.PlotLogicalErrRel(gsph1,gsph2,nStp,AxTR,fnList,2,doVerb=doVerb)
AxTR.set_title("Per-Cell Relative Error along K-Axis")
if (not noMPI):
#plot I-MPI decomp on logical plot
if(gsph2.Ri > 1):
for im in range(gsph2.Ri):
i0 = im*gsph2.dNi
AxTR.plot([i0, i0],[0, gsph2.Nj],"deepskyblue",linewidth=0.25,alpha=0.5)
#plot J-MPI decomp on logical plot
if (gsph2.Rj>1):
for jm in range(1,gsph2.Rj):
j0 = jm*gsph2.dNj
AxTR.plot([0, gsph2.Ni],[j0, j0],"deepskyblue",linewidth=0.25,alpha=0.5)
#plot bottom line plot
errTimes.append(tOut[i]/60.0)
errListRel.append(mviz.CalcTotalErrRel(gsph1,gsph2,nStp,fnList,doVerb=doVerb))
errListAbs.append(mviz.CalcTotalErrAbs(gsph1,gsph2,nStp,fnList,doVerb=doVerb))
if noLog:
AxB.plot(errTimes, errListRel,color=relColor)
AxB2.plot(errTimes, errListAbs,color=absColor)
else:
AxB.semilogy(errTimes, errListRel,color=relColor)
AxB2.semilogy(errTimes, errListAbs,color=absColor)
AxB.set_xlabel('Time (min)')
AxB.set_ylabel('Per-Cell Mean Relative Error',color=relColor)
AxB.tick_params(axis='y',which='both',colors=relColor,left=True,right=True,labelleft=True,labelright=False)
AxB2.set_ylabel('Per-Cell Mean Absolute Error',color=absColor)
#AxB2.yaxis.tick_right()
AxB2.tick_params(axis='y',which='both',colors=absColor,left=True,right=True,labelleft=False,labelright=True)
AxB.set_title("'" + fieldNames + "' Per-Cell Error Over Time")
gsph1.AddTime(nStp,AxTL,xy=[0.025,0.84],fs="x-large")
#Add MPI decomp
if (not noMPI):
mviz.PlotMPI(gsph2,AxTL)
fOut = oDir+"/vid.%04d.png"%(npl)
kv.savePic(fOut,bLenX=45)
bar()
makeMovie(oDir,oSub)

99
scripts/quicklook/gamerrpic.py Executable file
View File

@@ -0,0 +1,99 @@
#!/usr/bin/env python
#Make video of error between two Gamera cases
import argparse
from argparse import RawTextHelpFormatter
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import kaipy.kaiViz as kv
import matplotlib.gridspec as gridspec
import numpy as np
import numpy as np
import kaipy.gamera.msphViz as mviz
import kaipy.gamera.magsphere as msph
import kaipy.gamera.rcmpp as rcmpp
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import os
import errno
cLW = 0.25
if __name__ == "__main__":
#Defaults
fdir1 = os.getcwd()
ftag1 = "msphere"
fdir2 = os.getcwd()
ftag2 = "msphere"
nStp=1
fieldNames = "Bx, By, Bz"
doMPI = False #[Add MPI tiling]
noMPI = False
MainS = """Creates simple multi-panel figure for Gamera magnetosphere run
Left Panel - Residual vertical magnetic field
Right Panel - Pressure (or density) and hemispherical insets
"""
parser = argparse.ArgumentParser(description=MainS, formatter_class=RawTextHelpFormatter)
parser.add_argument('-d1',type=str,metavar="directory1",default=fdir1,help="Directory to read first dataset from (default: %(default)s)")
parser.add_argument('-id1',type=str,metavar="runid1",default=ftag1,help="RunID of first dataset (default: %(default)s)")
parser.add_argument('-d2',type=str,metavar="directory2",default=fdir2,help="Directory to read second dataset from (default: %(default)s)")
parser.add_argument('-id2',type=str,metavar="runid2",default=ftag2,help="RunID of second dataset (default: %(default)s)")
parser.add_argument('-n',type=int,metavar="nStp",default=nStp,help="Step number to plot (default: %(default)s)")
parser.add_argument('-f',type=str,metavar="fieldnames",default=fieldNames,help="Comma-separated fields to plot (default: %(default)s)")
#parser.add_argument('-nompi', action='store_true', default=noMPI,help="Don't show MPI boundaries (default: %(default)s)")
mviz.AddSizeArgs(parser)
#Finalize parsing
args = parser.parse_args()
fdir1 = args.d1
ftag1 = args.id1
fdir2 = args.d2
ftag2 = args.id2
nStp = args.n
fieldNames = args.f
oName = "gamErrPic.png"
#Get domain size
xyBds = mviz.GetSizeBds(args)
#---------
#Figure parameters
figSz = (12,7.5)
#======
#Init data
gsph1 = msph.GamsphPipe(fdir1,ftag1)
gsph2 = msph.GamsphPipe(fdir2,ftag2)
#======
#Setup figure
fig = plt.figure(figsize=figSz)
gs = gridspec.GridSpec(2,2,height_ratios=[20,1],hspace=0.025)
AxL = fig.add_subplot(gs[0,0])
AxR = fig.add_subplot(gs[0,1])
AxCL = fig.add_subplot(gs[-1,0])
AxCR = fig.add_subplot(gs[-1,1])
fnList = [item.strip() for item in fieldNames.split(',')]
AxL.clear()
AxR.clear()
mviz.PlotEqErrRel(gsph1,gsph2,nStp,xyBds,AxL,fnList,AxCB=AxCL)
mviz.PlotEqErrAbs(gsph1,gsph2,nStp,xyBds,AxR,fnList,AxCB=AxCR)
gsph1.AddTime(nStp,AxL,xy=[0.025,0.89],fs="x-large")
#Add MPI decomp
if (doMPI):
mviz.PlotMPI(gsph2,AxL)
mviz.PlotMPI(gsph2,AxR)
kv.savePic(oName,bLenX=45)

View File

@@ -63,9 +63,13 @@ import subprocess
# Import supplemental modules.
import astropy.time
from astropy.coordinates import SkyCoord
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import spacepy.datamodel as dm
from sunpy.coordinates import frames
# Import project-specific modules.
from kaipy import cdaweb_utils
@@ -93,7 +97,7 @@ default_last_step = -1
default_pictype = "pic1"
# Valid plot type strings.
valid_pictypes = ("pic1", "pic2", "pic3", "pic4", "pic5")
valid_pictypes = ("pic1", "pic2", "pic3", "pic4", "pic5", "pic6", "pic7")
# Default movie format.
default_movie_format = "mp4"
@@ -113,6 +117,8 @@ figure_sizes = {
"pic3": (10, 6.5),
"pic4": (10, 6),
"pic5": (12, 12),
"pic6": (12.5, 12.5),
"pic7": (10, 12.5),
}
# List of colors to use for spacecraft position dots.
@@ -142,49 +148,58 @@ def create_command_line_parser():
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--clobber", action="store_true", default=False,
"--clobber", action="store_true",
help="Overwrite existing frame and movie files (default: %(default)s)."
)
parser.add_argument(
"--debug", action="store_true", default=False,
"--debug", action="store_true",
help="Print debugging output (default: %(default)s)."
)
parser.add_argument(
"-d", "--directory", type=str, metavar="directory",
"--directory", "-d", type=str, metavar="directory",
default=os.getcwd(),
help="Directory containing data to read (default: %(default)s)"
)
parser.add_argument(
"-f", "--movie_format", type=str, metavar="movie_format",
default=default_movie_format,
help="Output movie format (default: %(default)s)"
)
parser.add_argument(
"-id", "--runid", type=str, metavar="runid", default=default_runid,
help="Run ID of data (default: %(default)s)"
)
parser.add_argument(
"-n0", "--first_step", type=int, metavar="n0",
"--first_step", "-n0", type=int, metavar="n0",
default=default_first_step,
help="First time step to plot (default: %(default)s)"
)
parser.add_argument(
"-n1", "--last_step", type=int, metavar="n1",
"--hgsplot", action="store_true",
help="Plot in the Heliographic Stonyhurst frame corresponding to the "
"date of the plot (default: %(default)s)."
)
parser.add_argument(
"--jslice", "-jslice", type=int, metavar="jSlice", default=None,
help="Index of j-slice for pic7 (default: Nj/2-1)"
)
parser.add_argument(
"--last_step", "-n1", type=int, metavar="n1",
default=default_last_step,
help="Last time step to plot (default: %(default)s)"
)
parser.add_argument(
"-p", "--pictype", type=str, metavar="pictype",
"--movie_format", type=str, metavar="movie_format",
default=default_movie_format,
help="Output movie format (default: %(default)s)"
)
parser.add_argument(
"--pictype", "-p", type=str, metavar="pictype",
default=default_pictype,
help="Code for plot type (default: %(default)s)"
)
parser.add_argument(
"--runid", "-id", type=str, metavar="runid", default=default_runid,
help="Run ID of data (default: %(default)s)"
)
parser.add_argument(
"--spacecraft", type=str, metavar="spacecraft", default=None,
help="Names of spacecraft to plot positions, separated by commas"
" (default: %(default)s)"
)
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
"--verbose", "-v", action="store_true", default=False,
help="Print verbose output (default: %(default)s)."
)
return parser
@@ -230,7 +245,7 @@ def fetch_spacecraft_trajectories(spacecraft_names, gsph):
MJDc = scutils.read_MJDc(fname)
# Fetch the trajectory of each spacecraft from CDAWeb.
for (i_sc, sc_id) in enumerate(spacecraft_names):
for sc_id in spacecraft_names:
# Fetch the spacecraft trajectory in whatever frame is available
# from CDAWeb.
@@ -288,7 +303,10 @@ def assemble_frames_into_gif(frame_files, args):
# Create the movie.
cmd = ["convert", "-delay", "10", "-loop", "0"]
cmd += frame_files
movie_file = os.path.join(movie_directory, f"{pictype}.gif")
if args.hgsplot:
movie_file = os.path.join(movie_directory, f"{pictype}-HGS.gif")
else:
movie_file = os.path.join(movie_directory, f"{pictype}.gif")
cmd.append(movie_file)
# NOTE: movie_file is overwritten by default.
subprocess.run(cmd, check=True)
@@ -324,8 +342,12 @@ def assemble_frames_into_mp4(frame_files, args):
# Create the movie.
frame_directory = os.path.split(frame_files[0])[0]
frame_pattern = os.path.join(frame_directory, f"{pictype}-%06d.png")
movie_file = os.path.join(movie_directory, f"{pictype}.mp4")
if args.hgsplot:
movie_file = os.path.join(movie_directory, f"{pictype}-HGS.mp4")
frame_pattern = os.path.join(frame_directory, f"{pictype}-HGS-%06d.png")
else:
movie_file = os.path.join(movie_directory, f"{pictype}.mp4")
frame_pattern = os.path.join(frame_directory, f"{pictype}-%06d.png")
cmd = [
"ffmpeg", "-r", "4", "-s", "1920x1080",
"-i", frame_pattern,
@@ -376,6 +398,65 @@ def assemble_frames_into_movie(frame_files, args):
return movie_file
def GHtoHGS(mjd_gh, x_gh, y_gh, z_gh, mjd_hgs):
"""Convert Cartesin GH coordinates to HGS.
Convert Cartesian coordinates in the gamhelio frame at time mjdc to
the Heliographic Sonyhurst frame at time mjd.
NOTE: The gamhelio frame at time t is related to the Heliographic
Stonyhurst frame at time t by the reflection of the x- and y-axes:
x_gh(t) = -x_hgs(t)
y_gh(t) = -y_hgs(t)
z_gh(t) = z_hgs(t)
Since HGS is a time-dependent frame, a time must be provided for each set
of coordinates.
Parameters
----------
mjd_gh : float
MJD of source gamhelio frame
x_gh, y_gh, z_gh : np.array of float (any shape) or scalar float
Cartesian coordinates in GH(mjdc) frame. All three arrays x, y, z must
have identical shapes.
mjd_hgs : float
MJD of target HGS frame
Returns
-------
x_hgs, y_hgs, z_hgs : np.array of float (same shape as x_gh, y_gh, z_gh)
Cartesian coordinates converted to HGS(mjd) frame.
Raises
------
None
"""
# Load the source coordinates (originially in the GH(mjd_gh) frame) into
# the equivalent HGS(mjd_gh) frame.
c_gh = SkyCoord(
-x_gh*u.Rsun, -y_gh*u.Rsun, z_gh*u.Rsun,
frame=frames.HeliographicStonyhurst,
obstime=ktools.MJD2UT(mjd_gh),
representation_type="cartesian"
)
# Create the target Heliographic Stonyhurst frame.
hgs_frame = frames.HeliographicStonyhurst(
obstime=ktools.MJD2UT(mjd_hgs)
)
# Convert the coordinates from GH(mjd_gh) to HGS(mjd_hgs).
c_hgs = c_gh.transform_to(hgs_frame)
# Extract and return the converted coordinates.
x_hgs = dm.dmarray(c_hgs.cartesian.x)
y_hgs = dm.dmarray(c_hgs.cartesian.y)
z_hgs = dm.dmarray(c_hgs.cartesian.z)
return x_hgs, y_hgs, z_hgs
def create_pic1_movie(args):
"""Create a pic1-style gamhelio movie.
@@ -397,6 +478,7 @@ def create_pic1_movie(args):
"""
# Extract arguments.
debug = args.debug
hgsplot=args.hgsplot
pictype = args.pictype
spacecraft = args.spacecraft
verbose = args.verbose
@@ -414,9 +496,6 @@ def create_pic1_movie(args):
if debug:
print(f"figsize = {figsize}")
# Create figures in a memory buffer.
mpl.use("Agg")
# Create the figure.
fig = plt.figure(figsize=figsize)
if debug:
@@ -477,7 +556,10 @@ def create_pic1_movie(args):
)
# Compute the path to the frame directory.
frame_directory = os.path.join(fdir, f"frames-{pictype}")
if hgsplot:
frame_directory = os.path.join(fdir, f"frames-{pictype}-HGS")
else:
frame_directory = os.path.join(fdir, f"frames-{pictype}")
try:
os.mkdir(frame_directory)
except FileExistsError as e:
@@ -486,6 +568,10 @@ def create_pic1_movie(args):
else:
raise e
# Get the MJDc value for use in computing the gamhelio frame.
fname = gsph.f0
MJDc = scutils.read_MJDc(fname)
# Create and save frame images for each step.
first_step = args.first_step
last_step = args.last_step
@@ -506,13 +592,19 @@ def create_pic1_movie(args):
print(f"mjd = {mjd}")
# Create the individual plots for this frame.
hviz.PlotEqMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v)
hviz.PlotEqD(gsph, i_step, plot_limits, ax_n, ax_cb_n)
hviz.PlotEqTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T)
hviz.PlotEqBr(gsph, i_step, plot_limits, ax_Br, ax_cb_Br)
# Add time in the upper left.
gsph.AddTime(i_step, ax_v, xy=[0.025, 0.875], fs="x-large")
hviz.PlotEqMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqD(gsph, i_step, plot_limits, ax_n, ax_cb_n,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqBr(gsph, i_step, plot_limits, ax_Br, ax_cb_Br,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
if hgsplot:
fig.suptitle("Heliographic Stonyhurst frame for "
f"{ktools.MJD2UT(mjd)}")
else:
fig.suptitle(f"GAMERA-Helio frame for {ktools.MJD2UT(mjd)}")
# Overlay spacecraft positions (optional).
if spacecraft:
@@ -526,7 +618,11 @@ def create_pic1_movie(args):
t_sc = mjd
x_sc = np.interp(t_sc, sc_t[sc_id], sc_x[sc_id])
y_sc = np.interp(t_sc, sc_t[sc_id], sc_y[sc_id])
# z_sc = np.interp(t_sc, sc_t[sc_id], sc_z[sc_id])
z_sc = np.interp(t_sc, sc_t[sc_id], sc_z[sc_id])
# If needed, convert the position to HGS(mjd).
if hgsplot:
x_sc, y_sc, z_sc = GHtoHGS(MJDc, x_sc, y_sc, z_sc, mjd)
# Plot the spacecraft position as a colored circle with black
# outline and a label.
@@ -541,7 +637,10 @@ def create_pic1_movie(args):
c="black", horizontalalignment="center")
# Save the figure to a file.
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if hgsplot:
path = os.path.join(frame_directory, f"{pictype}-HGS-{i_step:06d}.png")
else:
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if debug:
print(f"path = {path}")
kv.savePic(path, bLenX=45)
@@ -582,6 +681,7 @@ def create_pic2_movie(args):
"""
# Extract arguments.
debug = args.debug
hgsplot=args.hgsplot
pictype = args.pictype
spacecraft = args.spacecraft
verbose = args.verbose
@@ -591,9 +691,6 @@ def create_pic2_movie(args):
if debug:
print(f"plot_limits = {plot_limits}")
# Create all plot images in a memory buffer.
mpl.use("Agg")
# Fetch the figure size.
figsize = figure_sizes[pictype]
if debug:
@@ -662,7 +759,10 @@ def create_pic2_movie(args):
)
# Compute the path to the frame directory.
frame_directory = os.path.join(fdir, f"frames-{pictype}")
if hgsplot:
frame_directory = os.path.join(fdir, f"frames-HGS-{pictype}-HGS")
else:
frame_directory = os.path.join(fdir, f"frames-{pictype}")
try:
os.mkdir(frame_directory)
except FileExistsError as e:
@@ -671,6 +771,10 @@ def create_pic2_movie(args):
else:
raise e
# Get the MJDc value for use in computing the gamhelio frame.
fname = gsph.f0
MJDc = scutils.read_MJDc(fname)
# Create and save frame images for each step.
first_step = args.first_step
last_step = args.last_step
@@ -691,13 +795,19 @@ def create_pic2_movie(args):
print(f"mjd = {mjd}")
# Create the individual plots for this frame.
hviz.PlotMerMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v)
hviz.PlotMerDNorm(gsph, i_step, plot_limits, ax_n, ax_cb_n)
hviz.PlotMerTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T)
hviz.PlotMerBrNorm(gsph, i_step, plot_limits, ax_Br, ax_cb_Br)
# Add time in the upper left.
gsph.AddTime(i_step, ax_v, xy=[0.025, 0.875], fs="x-large")
hviz.PlotMerMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerDNorm(gsph, i_step, plot_limits, ax_n, ax_cb_n,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerBrNorm(gsph, i_step, plot_limits, ax_Br, ax_cb_Br,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
if hgsplot:
fig.suptitle("Heliographic Stonyhurst frame for "
f"{ktools.MJD2UT(mjd)}")
else:
fig.suptitle(f"GAMERA-Helio frame for {ktools.MJD2UT(mjd)}")
# Overlay spacecraft positions (optional).
if spacecraft:
@@ -710,9 +820,13 @@ def create_pic2_movie(args):
# Interpolate the spacecraft position at the time for the plot.
t_sc = mjd
x_sc = np.interp(t_sc, sc_t[sc_id], sc_x[sc_id])
# y_sc = np.interp(t_sc, sc_t[sc_id], sc_y[sc_id])
y_sc = np.interp(t_sc, sc_t[sc_id], sc_y[sc_id])
z_sc = np.interp(t_sc, sc_t[sc_id], sc_z[sc_id])
# If needed, convert the position to HGS(mjd).
if hgsplot:
x_sc, y_sc, z_sc = GHtoHGS(MJDc, x_sc, y_sc, z_sc, mjd)
# Plot the spacecraft position as a colored circle with black
# outline and a label.
x_nudge = 0.0
@@ -726,7 +840,10 @@ def create_pic2_movie(args):
c="black", horizontalalignment="center")
# Save the figure to a file.
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if hgsplot:
path = os.path.join(frame_directory, f"{pictype}-HGS-{i_step:06d}.png")
else:
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if debug:
print(f"path = {path}")
kv.savePic(path, bLenX=45)
@@ -767,6 +884,7 @@ def create_pic3_movie(args):
"""
# Extract arguments.
debug = args.debug
hgsplot=args.hgsplot
pictype = args.pictype
spacecraft = args.spacecraft
verbose = args.verbose
@@ -776,9 +894,6 @@ def create_pic3_movie(args):
if debug:
print(f"plot_limits = {plot_limits}")
# Create all plot images in a memory buffer.
mpl.use("Agg")
# Fetch the figure size.
figsize = figure_sizes[pictype]
if debug:
@@ -847,7 +962,10 @@ def create_pic3_movie(args):
)
# Compute the path to the frame directory.
frame_directory = os.path.join(fdir, f"frames-{pictype}")
if hgsplot:
frame_directory = os.path.join(fdir, f"frames-HGS-{pictype}-HGS")
else:
frame_directory = os.path.join(fdir, f"frames-{pictype}")
try:
os.mkdir(frame_directory)
except FileExistsError as e:
@@ -856,6 +974,10 @@ def create_pic3_movie(args):
else:
raise e
# Get the MJDc value for use in computing the gamhelio frame.
fname = gsph.f0
MJDc = scutils.read_MJDc(fname)
# Create and save frame images for each step.
first_step = args.first_step
last_step = args.last_step
@@ -876,13 +998,26 @@ def create_pic3_movie(args):
print(f"mjd = {mjd}")
# Create the individual plots for this frame.
hviz.PlotiSlMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v)
hviz.PlotiSlD(gsph, i_step, plot_limits, ax_n, ax_cb_n)
hviz.PlotiSlTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T)
hviz.PlotiSlBr(gsph, i_step, plot_limits, ax_Br, ax_cb_Br)
# Add time in the upper left.
gsph.AddTime(i_step, ax_v, xy=[0.015, 0.82], fs="small")
AU_RSUN = 215.0
radius = AU_RSUN
hviz.PlotiSlMagV(gsph, i_step, plot_limits, ax_v, ax_cb_v, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotiSlD(gsph, i_step, plot_limits, ax_n, ax_cb_n, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotiSlTemp(gsph, i_step, plot_limits, ax_T, ax_cb_T, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotiSlBr(gsph, i_step, plot_limits, ax_Br, ax_cb_Br, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
if hgsplot:
fig.suptitle("Heliographic Stonyhurst frame at 1 AU for "
f"{ktools.MJD2UT(mjd)}")
else:
fig.suptitle("GAMERA-Helio frame at 1 AU for "
f"{ktools.MJD2UT(mjd)}")
# Overlay spacecraft positions (optional).
if spacecraft:
@@ -895,6 +1030,13 @@ def create_pic3_movie(args):
# Interpolate the spacecraft position at the time for the plot.
t_sc = mjd
# If needed, convert the position to HGS(mjd).
if hgsplot:
sc_x[sc_id], sc_y[sc_id], sc_z[sc_id] = (
GHtoHGS(MJDc, sc_x[sc_id], sc_y[sc_id], sc_z[sc_id],
mjd)
)
# Convert Cartesian location to heliocentric lon/lat.
rxy = np.sqrt(sc_x[sc_id]**2 + sc_y[sc_id]**2)
theta = np.arctan2(rxy, sc_z[sc_id])
@@ -917,7 +1059,10 @@ def create_pic3_movie(args):
c="black", horizontalalignment="center")
# Save the figure to a file.
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if hgsplot:
path = os.path.join(frame_directory, f"{pictype}-HGS-{i_step:06d}.png")
else:
path = os.path.join(frame_directory, f"{pictype}-{i_step:06d}.png")
if debug:
print(f"path = {path}")
kv.savePic(path, bLenX=45)
@@ -1240,6 +1385,54 @@ def create_pic5_movie(args):
return movie_file
def create_pic6_movie(args):
"""Create a pic6-style gamhelio movie.
Create a pic6-style gamhelio movie.
Parameters
----------
args : dict
Dictionary of command-line arguments.
Returns
-------
movie_file : str
Path to movie file.
Raises
------
None
"""
# Return the path to the movie file.
movie_file = None
return movie_file
def create_pic7_movie(args):
"""Create a pic7-style gamhelio movie.
Create a pic7-style gamhelio movie.
Parameters
----------
args : dict
Dictionary of command-line arguments.
Returns
-------
movie_file : str
Path to movie file.
Raises
------
None
"""
# Return the path to the movie file.
movie_file = None
return movie_file
def create_gamhelio_movie(args):
"""Create a gamhelio movie.
@@ -1248,7 +1441,7 @@ def create_gamhelio_movie(args):
Parameters
----------
args : dict
Dictionary of command-line options.
Dictionary of command-line arguments.
Returns
-------
@@ -1263,7 +1456,7 @@ def create_gamhelio_movie(args):
debug = args.debug
pictype = args.pictype
# Check that a valid plot code was provided.
# Check that a valid picture type code was provided.
if pictype not in valid_pictypes:
raise TypeError(f"Invalid plot type ({pictype})!")
@@ -1278,6 +1471,10 @@ def create_gamhelio_movie(args):
movie_file = create_pic4_movie(args)
elif pictype == "pic5":
movie_file = create_pic5_movie(args)
elif pictype == "pic6":
movie_file = create_pic6_movie(args)
elif pictype == "pic7":
movie_file = create_pic7_movie(args)
else:
raise TypeError(f"Invalid plot type ({pictype})!")
if debug:
@@ -1295,17 +1492,14 @@ def main():
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
verbose = args.verbose
if debug:
if args.debug:
print(f"args = {args}")
# Create the movie based on the selected picture type.
movie_file = create_gamhelio_movie(args)
if verbose:
if args.verbose:
print(f"The movie is available in {movie_file}.")
if __name__ == "__main__":
"""Begin main program."""
main()

View File

@@ -1,30 +1,72 @@
#!/usr/bin/env python
"""Make a quick figure of a Gamera heliosphere run.
"""Make a quick-look plot for a gamhelio run.
Make a quick figure of a Gamera heliosphere run.
Make a quick-look plot for a gamhelio run.
Five different sets of plots are supported, and are distinguished by the
Several different sets of plots are supported, and are distinguished by the
value of the "pic" argument.
pic1 (default): A 4-panel display showing radial speed, number
density*(r/r0)**2, temperature*(r/r0), and radial magnetic field*(r/r0)**2.
These plots are done in the XY plane of the gamhelio frame (which is a
Heliographic Stonyhurst (HGS) frame, modified with the +x reversed from
the usual HGS definition)
pic1 (default): A 4-panel display showing pcolormesh plots in the z = 0
(equatorial) plane of the gamhelio frame used in the simulation. The plots
are (r0 is the inner radius of the grid, which should be 21.5 Rsun):
pic2:
Upper left: Solar wind speed (km/s)
Upper right: Solar wind number density scaled by (r/r0)**2 (cm**-3)
Lower left: Solar wind temperature scaled by r/r0 (MK)
Lower right: Solar wind radial magnetic field scaled by (r/r0)**2 (nT)
pic3:
pic2: A 4-panel display showing pcolormesh plots in the y = 0 (meridional,
containing Earth) plane of the gamhelio frame used in the simulation. The
plots are (r0 is the inner radius of the grid, which should be 21.5 Rsun):
pic4:
Upper left: Solar wind speed (km/s)
Upper right: Solar wind number density scaled by (r/r0)**2 (cm**-3)
Lower left: Solar wind temperature scaled by r/r0 (MK)
Lower right: Solar wind radial magnetic field scaled by (r/r0)**2 (nT)
pic5:
pic3: A 4-panel display showing pcolormesh plots in the r = 1 AU slice of the
gamhelio frame used in the simulation. The plots are:
Author
------
Upper left: Solar wind speed (km/s)
Upper right: Solar wind number density (cm**-3)
Lower left: Solar wind temperature (MK)
Lower right: Solar wind radial magnetic field (nT)
pic4: A pcolormesh plot in the innermost radial slice (r = 22 Rsun) of the
gamhelio frame used in the simulation. The plot shows the radial magnetic
field in nT, in a coordinate frame rotating with the Sun. - SCALED?
pic5: A 3-panel display showing solar wind variables as a function of radius,
22 Rsun <= r <= 220 Rsun. The plots are:
Upper left: Solar wind number density (cm**-3) - SCALED?
Upper right: Solar wind speed (km/s) - SCALED?
Lower left: Solar wind radial momentum flux (km**2/s**2/cm**3) - SCALED?
pic6: A 4-panel display showing components of the solar wind magnetic field
in the solar equatorial plane (z=0), for -200 Rsun <= X, Y <= +200 Rsun.
Upper left: Radial component of magnetic field (nT)
Upper right: x-component of magnetic field (nT)
Lower left: y-component of magnetic field (nT)
Lower right: z-component of magnetic field (nT)
pic7: A 4-panel display showing pcolormesh plots in a j-slice. A j-slice is
a slice through the gamhelio data cube at a fixed colatitude. j = 0 corresponds
to the YZ plane of the gamhelio frame used in the simulation. The j = Nj/2-1
slice corresponds to the equatorial plane. The plots are:
Upper left: Solar wind speed (km/s)
Upper right: Solar wind number density scaled by (r/r0)**2 (cm**-3)
Lower left: Solar wind temperature scaled by r/r0 (MK)
Lower right: Solar wind radial magnetic field scaled by r/r0 (nT)
Authors
-------
Elena Provornikova (elena.provornikova@jhuapl.edu)
Andrew McCubbin (andrew.mccubbin@jhuapl.edu)
Eric Winter (eric.winter@jhuapl.edu)
"""
@@ -36,10 +78,14 @@ import time
# Import supplemental modules.
import astropy
from astropy.coordinates import SkyCoord
import astropy.units as u
import matplotlib as mpl
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import spacepy.datamodel as dm
from sunpy.coordinates import frames
# Import project-specific modules.
from kaipy import cdaweb_utils
@@ -48,34 +94,29 @@ import kaipy.gamhelio.heliosphere as hsph
import kaipy.kaiH5 as kh5
import kaipy.kaiTools as ktools
import kaipy.kaiViz as kv
import kaipy.kdefs as kdefs
from kaipy.satcomp import scutils
# Program constants and defaults
# Program description.
description = """Creates multi-panel figure for Gamera heliosphere run
Upper left - Solar wind speed
Upper right - Solar wind number density
Lower left - Solar wind temperature
Lower right - Solar wind radial magnetic field
"""
DESCRIPTION = "Make a quicklook plot for a gamhelio run."
# Default identifier for results to read.
default_runid = "wsa"
DEFAULT_RUNID = "wsa"
# List of steps
default_steps = "-1"
DEFAULT_STEPS = "1"
# Default slices
default_slice = "1:2:1"
DEFAULT_SLICE = None
# Code for default picture type.
default_pictype = "pic1"
DEFAULT_PICTYPE = "pic1"
# Colors to use for spacecraft position symbols.
SPACECRAFT_COLORS = list(mpl.colors.TABLEAU_COLORS.keys())
# Color to use for spacecraft position symbols.
SPACECRAFT_COLOR = "red"
def create_command_line_parser():
"""Create the command-line argument parser.
@@ -90,67 +131,100 @@ def create_command_line_parser():
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
Raises
------
None
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawTextHelpFormatter
)
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--debug", action="store_true", default=False,
"--debug", action="store_true",
help="Print debugging output (default: %(default)s)."
)
parser.add_argument(
"-d", type=str, metavar="directory", default=os.getcwd(),
"--directory", "-d", type=str, metavar="directory",
default=os.getcwd(),
help="Directory containing data to read (default: %(default)s)"
)
parser.add_argument(
"-id", type=str, metavar="runid", default=default_runid,
"--hgsplot", action="store_true",
help="Plot in the Heliographic Stonyhurst frame corresponding to the "
"date of the plot (default: %(default)s)."
)
parser.add_argument(
"-id", type=str, metavar="runid", default=DEFAULT_RUNID,
help="Run ID of data (default: %(default)s)"
)
parser.add_argument(
"--nlist", type=lambda n: [int(item) for item in n.split(',')], metavar="list of steps", default=default_steps,
help="List of time slice(s) to plot (default: %(default)s)"
)
parser.add_argument(
"--nslice", type=lambda n: [int(item) for item in n.split(':')], metavar="step slice", default=default_slice,
help="Slice for range of time slice(s) to plot (default: %(default)s)"
)
parser.add_argument(
"-nompi", action="store_true", default=False,
help="Don't show MPI boundaries (default: %(default)s)."
)
parser.add_argument(
"-pic", type=str, metavar="pictype", default=default_pictype,
help="Code for plot type (default: %(default)s)"
)
parser.add_argument(
"--spacecraft", type=str, metavar="spacecraft", default=None,
help="Names of spacecraft to plot positions, separated by commas (default: %(default)s)"
)
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Print verbose output (default: %(default)s)."
)
parser.add_argument(
"-o", "--outfile", type=str, metavar="outFile", default=fOut,
help="Output file name (default: %(default)s)."
)
parser.add_argument(
"-p", "--parallel", action="store_true", default=False,
help="Read from HDF5 in parallel (default: %(default)s)."
)
parser.add_argument(
"-nw", "--nworkers", type=int, metavar="nworkers", default=4,
help="Number of parallel workers (default: %(default)s)"
"-jslice", type=int, metavar="jSlice", default=None,
help="Index of j-slice for pic7 (default: Nj/2-1)"
)
parser.add_argument(
"-lon", type=float, metavar="lon", default=0.0,
help="Longitude of meridian slice (pic2) (default: %(default)s)"
)
parser.add_argument(
"--nlist", type=lambda n: [int(item) for item in n.split(',')],
metavar="list of steps", default=DEFAULT_STEPS,
help="List of time slice(s) n1,n2,... to plot (default: %(default)s)"
)
parser.add_argument(
"--nslice", type=lambda n: [int(item) for item in n.split(':')],
metavar="step slice", default=DEFAULT_SLICE,
help="Slice for range of time slice(s) n1:n2 to plot "
"(default: %(default)s)"
)
parser.add_argument(
"--nworkers", "-nw", type=int, metavar="nworkers", default=4,
help="Number of parallel workers (default: %(default)s)"
)
parser.add_argument(
"--parallel", "-p", action="store_true",
help="Read from HDF5 in parallel (default: %(default)s)."
)
parser.add_argument(
"-pic", type=str, metavar="pictype",
default=DEFAULT_PICTYPE,
help="Code for plot type (pic1-pic7) (default: %(default)s)"
)
parser.add_argument(
"--spacecraft", type=str, metavar="spacecraft", default=None,
help="Names of spacecraft to plot positions, separated by commas "
"(default: %(default)s)"
)
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Print verbose output (default: %(default)s)."
)
parser.add_argument(
"--inner", action="store_true",
help="Plot inner i-slice for pic3 (default: %(default)s)."
)
# Return the parser.
return parser
def initFig(pic):
# Determine figure size (width, height) (inches) based on the picture type.
"""Create the matplotlib figure for a plot.
Determine figure size (width, height) (inches) based on the pic type.
Parameters
----------
pic : str
String representing picture type.
Returns
-------
fig : mpl.Figure
Matplotlib Figure to use for plots.
Raises
------
None
"""
# Figure dimensions are in inches.
if pic == "pic1" or pic == "pic2" or pic == "pic7":
figSz = (10, 12.5)
elif pic == "pic3":
@@ -159,84 +233,188 @@ def initFig(pic):
figSz = (10, 6)
elif pic == "pic5":
figSz = (12, 12)
elif pic == "pic6":
figSz = (12.5, 12.5)
elif pic == "pic6":
figSz = (10, 12.5)
# Create the figure.
fig = plt.figure(figsize=figSz)
fig = plt.figure(figsize=figSz, layout="constrained")
return fig
# Name of plot output file.
def fOut(id, pic, nStp):
return "qkpic_{}_{}_n{}.png".format(id, pic, nStp)
if __name__ == "__main__":
from alive_progress import alive_bar
def fOut(runid, pic, nStp, hgsplot):
"""Compute the name of the output file.
"""Make a quick figure of a Gamera heliosphere run."""
Compute the name of the output file.
Parameters
----------
runid : str
ID string for run
pic : str
String representing picture type.
nStp : int
Simulation step number used in plot.
hgsplot : bool
True if plot is in HGS frame at the date of the plot
Returns
-------
s : str
Name of file to receive the plot.
Raises
------
None
"""
if hgsplot:
s = f"qkpic_{runid}_{pic}_HGS_n{nStp}.png"
else:
s = f"qkpic_{runid}_{pic}_n{nStp}.png"
return s
def GHtoHGS(mjd_gh, x_gh, y_gh, z_gh, mjd_hgs):
"""Convert Cartesin GH coordinates to HGS.
Convert Cartesian coordinates in the gamhelio frame at time mjdc to
the Heliographic Sonyhurst frame at time mjd.
NOTE: The gamhelio frame at time t is related to the Heliographic
Stonyhurst frame at time t by the reflection of the x- and y-axes:
x_gh(t) = -x_hgs(t)
y_gh(t) = -y_hgs(t)
z_gh(t) = z_hgs(t)
Since HGS is a time-dependent frame, a time must be provided for each set
of coordinates.
Parameters
----------
mjd_gh : float
MJD of source gamhelio frame
x_gh, y_gh, z_gh : np.array of float (any shape) or scalar float
Cartesian coordinates in GH(mjdc) frame. All three arrays x, y, z must
have identical shapes.
mjd_hgs : float
MJD of target HGS frame
Returns
-------
x_hgs, y_hgs, z_hgs : np.array of float (same shape as x_gh, y_gh, z_gh)
Cartesian coordinates converted to HGS(mjd) frame.
Raises
------
None
"""
# Load the source coordinates (originially in the GH(mjd_gh) frame) into
# the equivalent HGS(mjd_gh) frame.
c_gh = SkyCoord(
-x_gh*u.Rsun, -y_gh*u.Rsun, z_gh*u.Rsun,
frame=frames.HeliographicStonyhurst,
obstime=ktools.MJD2UT(mjd_gh),
representation_type="cartesian"
)
# Create the target Heliographic Stonyhurst frame.
hgs_frame = frames.HeliographicStonyhurst(
obstime=ktools.MJD2UT(mjd_hgs)
)
# Convert the coordinates from GH(mjd_gh) to HGS(mjd_hgs).
c_hgs = c_gh.transform_to(hgs_frame)
# Extract and return the converted coordinates.
x_hgs = dm.dmarray(c_hgs.cartesian.x)
y_hgs = dm.dmarray(c_hgs.cartesian.y)
z_hgs = dm.dmarray(c_hgs.cartesian.z)
return x_hgs, y_hgs, z_hgs
def main():
"""Make a quick-look plot for a gamhelio run."""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
if args.debug:
print(f"args = {args}")
debug = args.debug
fdir = args.d
fdir = args.directory
hgsplot = args.hgsplot
ftag = args.id
noMPI = args.nompi
jslice = args.jslice
pic2lon = args.lon
steps = args.nlist
slices = args.nslice
nWorkers = args.nworkers
doParallel = args.parallel
pic = args.pic
spacecraft = args.spacecraft
verbose = args.verbose
fOut = args.outfile
pic = args.pic
doParallel = args.parallel
nWorkers = args.nworkers
pic2lon = args.lon
if debug:
print("args = %s" % args)
if slices:
print("Slice selected {}".format(slice(slices[0],slices[1],slices[2])))
# Invert the MPI flag for convenience.
doMPI = not noMPI
inner = args.inner
if slices:
print(f"Slice selected {slice(slices[0], slices[1], slices[2])}")
tic = time.perf_counter()
# Fetch the plot domain based on the picture type.
tic = time.perf_counter()
xyBds = hviz.GetSizeBds(pic)
toc = time.perf_counter()
print(xyBds)
print(f"Get bounds took {toc-tic} s")
print(f"Get bounds took {toc - tic} s")
# Do work?
doFast = False
# Open a pipe to the results data.
tic = time.perf_counter()
gsph = hsph.GamsphPipe(fdir, ftag, doFast=doFast, doParallel=doParallel,
nWorkers=nWorkers)
toc = time.perf_counter()
print(f"Open pipe took {toc-tic} s")
# Compute the range of time steps to use.
if slices and steps[0] == 1:
steps = range(gsph.s0,gsph.sFin + 1)[slice(slices[0], slices[1], slices[2])]
print(f"steps = {steps}")
# Get the MJDc value for use in computing the gamhelio frame.
fname = gsph.f0
MJDc = scutils.read_MJDc(fname)
# Split the list into individual spacecraft names.
if spacecraft:
spacecraft = spacecraft.split(',')
# Create figures in a memory buffer.
mpl.use("Agg")
# Open a pipe to the results data.
tic = time.perf_counter()
gsph = hsph.GamsphPipe(fdir, ftag, doFast=doFast, doParallel=doParallel, nWorkers=nWorkers)
toc = time.perf_counter()
print(f"Open pipe took {toc-tic} s")
# Make a plot for each time step in the list of time steps.
for nStp in steps:
if debug:
print(f"nStp = {nStp}")
if(slices and steps[0] == -1):
steps = range(gsph.sFin)[slice(slices[0],slices[1],slices[2])]
nsteps = len(steps)
for nStp in steps:
tic = time.perf_counter()
print(f"Generating {pic} for time step {nStp}")
print(f"Generating {pic} for time step {nStp}.")
fig = initFig(pic)
# Extract the MJD for the step.
mjd = gsph.MJDs[nStp]
if any(gsph.MJDs):
mjd = gsph.MJDs[nStp-gsph.s0]
time_stamp = ktools.MJD2UT(mjd)
else:
mjd = gsph.T[nStp-gsph.s0]/(3600./gsph.tScl)
time_stamp = f"{mjd:0.2f} [hr]"
if debug:
print(f"mjd = {mjd}")
# Lay out the subplots.
if pic == "pic1" or pic == "pic2" or pic == "pic3" or pic == "pic6" or pic == "pic7":
gs = gridspec.GridSpec(4, 6, height_ratios=[20, 1, 20, 1])
if pic in ["pic1", "pic2", "pic3", "pic6", "pic7"]:
gs = gridspec.GridSpec(4, 6, height_ratios=[20, 1, 20, 1], figure=fig)
# Axes for plots.
AxL0 = fig.add_subplot(gs[0, 0:3])
AxR0 = fig.add_subplot(gs[0, 3:])
@@ -248,121 +426,155 @@ if __name__ == "__main__":
AxC1_1 = fig.add_subplot(gs[3, 0:3])
AxC2_1 = fig.add_subplot(gs[3, 3:])
elif pic == "pic4":
gs = gridspec.GridSpec(2, 1, height_ratios=[20, 1])
gs = gridspec.GridSpec(2, 1, height_ratios=[20, 1], figure=fig)
Ax = fig.add_subplot(gs[0, 0])
AxC = fig.add_subplot(gs[1, 0])
elif pic == "pic5":
gs = gridspec.GridSpec(2, 2)
gs = gridspec.GridSpec(2, 2, figure=fig)
Ax = fig.add_subplot(gs[0, 0])
AxC = fig.add_subplot(gs[0, 1])
AxC1 = fig.add_subplot(gs[1, 0])
else:
raise TypeError(f"Invalid figure type: {pic}!")
# If the step is -1, use the last step.
if nStp < 0:
nStp = gsph.sFin
print("Using Step %d" % nStp)
print(f"Using Step {nStp}")
# Now create the actual plots.
if pic == "pic1":
# These are all equatorial plots in the XY plane of the HGS frame
# used by gamhelio.
hviz.PlotEqMagV(gsph, nStp, xyBds, AxL0, AxC1_0)
hviz.PlotEqD(gsph, nStp, xyBds, AxR0, AxC2_0)
hviz.PlotEqTemp(gsph, nStp, xyBds, AxL1, AxC1_1)
hviz.PlotEqBr(gsph, nStp, xyBds, AxR1, AxC2_1)
# Equatorial plots in the XY plane of the modified HGS frame used
# by gamhelio. If hgsplot is True, then the plot frame is the true
# HGS frame at the time of the plot.
hviz.PlotEqMagV(gsph, nStp, xyBds, AxL0, AxC1_0,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqD(gsph, nStp, xyBds, AxR0, AxC2_0,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqTemp(gsph, nStp, xyBds, AxL1, AxC1_1,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqBr(gsph, nStp, xyBds, AxR1, AxC2_1,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
if hgsplot:
fig.suptitle("Heliographic Stonyhurst frame for "
f"{time_stamp}")
else:
fig.suptitle(f"GAMERA-Helio frame for {time_stamp}")
elif pic == "pic2":
hviz.PlotMerMagV(gsph ,nStp, xyBds, AxL0, AxC1_0,indx=(None,pic2lon))
hviz.PlotMerDNorm(gsph, nStp, xyBds, AxR0, AxC2_0,indx=(None,pic2lon))
hviz.PlotMerTemp(gsph, nStp, xyBds, AxL1, AxC1_1,indx=(None,pic2lon))
hviz.PlotMerBrNorm(gsph, nStp, xyBds, AxR1, AxC2_1,indx=(None,pic2lon))
# Meridional plots in the XZ plane of the modified HGS frame used
# by gamhelio. If hgsplot is True, then the plot frame is the true
# HGS frame at the time of the plot.
hviz.PlotMerMagV(gsph, nStp, xyBds, AxL0, AxC1_0,
indx=(None, pic2lon),
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerDNorm(gsph, nStp, xyBds, AxR0, AxC2_0,
indx=(None, pic2lon),
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerTemp(gsph, nStp, xyBds, AxL1, AxC1_1,
indx=(None, pic2lon),
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotMerBrNorm(gsph, nStp, xyBds, AxR1, AxC2_1,
indx=(None, pic2lon),
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
if hgsplot:
fig.suptitle("Heliographic Stonyhurst frame for "
f"{time_stamp}")
else:
fig.suptitle(f"GAMERA-Helio frame for {time_stamp}")
elif pic == "pic3":
hviz.PlotiSlMagV(gsph, nStp, xyBds, AxL0, AxC1_0,idx=0)
hviz.PlotiSlD(gsph, nStp, xyBds, AxR0, AxC2_0,idx=0)
hviz.PlotiSlTemp(gsph, nStp, xyBds, AxL1, AxC1_1,idx=0)
hviz.PlotiSlBr(gsph, nStp, xyBds, AxR1, AxC2_1,idx=0)
# Lat/lon plot at 1 AU (the outer edge of the gamhelio grid), in
# the modified HGS frame rotating with the Sun.
AU_RSUN = 215.0
radius = AU_RSUN
I_RSUN = 21.5
if inner : radius = I_RSUN
hviz.PlotiSlMagV(gsph, nStp, xyBds, AxL0, AxC1_0, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotiSlD(gsph, nStp, xyBds, AxR0, AxC2_0, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd,
use_outer_range=(not inner) )
hviz.PlotiSlTemp(gsph, nStp, xyBds, AxL1, AxC1_1, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd,
use_outer_range=(not inner))
hviz.PlotiSlBr(gsph, nStp, xyBds, AxR1, AxC2_1, idx=radius,
idx_is_radius=True,
hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd,
use_outer_range=(not inner))
if hgsplot:
fig.suptitle(f"Heliographic Stonyhurst frame at {radius} [RE] for {time_stamp}")
else:
fig.suptitle(f"GAMERA-Helio frame at {radius} [RE] for {time_stamp}")
elif pic == "pic4":
# Plot at 1 AU in frame rotating with Sun.
hviz.PlotiSlBrRotatingFrame(gsph, nStp, xyBds, Ax, AxC)
elif pic == "pic5":
hviz.PlotDensityProf(gsph, nStp, xyBds, Ax)
hviz.PlotSpeedProf(gsph, nStp, xyBds, AxC)
hviz.PlotFluxProf(gsph, nStp, xyBds, AxC1)
elif pic == "pic6":
hviz.PlotEqBr(gsph, nStp, xyBds, AxL0, AxC1_0)
hviz.PlotEqBx(gsph, nStp, xyBds, AxR0, AxC2_0)
hviz.PlotEqBy(gsph, nStp, xyBds, AxL1, AxC1_1)
hviz.PlotEqBz(gsph, nStp, xyBds, AxR1, AxC2_1)
hviz.PlotEqBr(gsph, nStp, xyBds, AxL0, AxC1_0, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqBx(gsph, nStp, xyBds, AxR0, AxC2_0, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqBy(gsph, nStp, xyBds, AxL1, AxC1_1, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotEqBz(gsph, nStp, xyBds, AxR1, AxC2_1, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
fig.suptitle("GAMERA-Helio frame for "
f"{time_stamp}")
elif pic == "pic7":
hviz.PlotjMagV(gsph, nStp, xyBds, AxL0, AxC1_0,jidx=448)
hviz.PlotjD(gsph, nStp, xyBds, AxR0, AxC2_0,jidx=448)
hviz.PlotjTemp(gsph, nStp, xyBds, AxL1, AxC1_1,jidx=448)
hviz.PlotjBr(gsph, nStp, xyBds, AxR1, AxC2_1,jidx=448)
if jslice is None:
jidx = gsph.Nj//2 - 1
else:
jidx = jslice
hviz.PlotjMagV(gsph, nStp, xyBds, AxL0, AxC1_0, jidx=jidx, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotjD(gsph, nStp, xyBds, AxR0, AxC2_0, jidx=jidx, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotjTemp(gsph, nStp, xyBds, AxL1, AxC1_1, jidx=jidx, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
hviz.PlotjBr(gsph, nStp, xyBds, AxR1, AxC2_1, jidx=jidx, hgsplot=hgsplot, MJDc=MJDc, MJD_plot=mjd)
fig.suptitle("GAMERA-Helio frame for "
f"{time_stamp}")
else:
print ("Pic is empty. Choose pic1 or pic2 or pic3")
raise TypeError(f"Invalid figure type: {pic}!")
# Add time in the upper left.
if pic == "pic1" or pic == "pic2" or pic == "pic6" or pic == "pic7":
gsph.AddTime(nStp, AxL0, xy=[0.025, 0.875], fs="x-large")
elif pic == "pic3":
gsph.AddTime(nStp, AxL0, xy=[0.015, 0.82], fs="small")
elif pic == "pic4" or pic == "pic5":
# Add time in the upper left (if not in figure title).
# if pic == "pic1" or pic == "pic2" or pic == "pic3" or pic == "pic6":
if pic == "pic4" or pic == "pic5":
gsph.AddTime(nStp, Ax, xy=[0.015, 0.92], fs="small")
else:
print ("Pic is empty. Choose pic1 or pic2 or pic3")
# Overlay the spacecraft trajectory, if needed.
# Overlay the spacecraft positions.
if spacecraft:
print("Overplotting spacecraft trajectories of %s." % spacecraft)
# Split the list into individual spacecraft names.
spacecraft = spacecraft.split(',')
if debug:
print("spacecraft = %s" % spacecraft)
# Fetch the MJD start and end time of the model results.
fname = gsph.f0
if debug:
print("fname = %s" % fname)
MJD_start = kh5.tStep(fname, 0, aID="MJD")
if debug:
print("MJD_start = %s" % MJD_start)
# Fetch the MJD at start and end of the model results.
MJD_start = kh5.tStep(fname, gsph.s0, aID="MJD")
MJD_end = kh5.tStep(fname, gsph.sFin, aID="MJD")
if debug:
print("MJD_end = %s" % MJD_end)
# Convert the start and stop MJD to a datetime object in UT.
ut_start = ktools.MJD2UT(MJD_start)
if debug:
print("ut_start = %s" % ut_start)
ut_end = ktools.MJD2UT(MJD_end)
if debug:
print("ut_end = %s" % ut_end)
# Get the MJDc value for use in computing the gamhelio frame.
MJDc = scutils.read_MJDc(fname)
if debug:
print("mjdc = %s" % MJDc)
# Fetch and plot the trajectory of each spacecraft from CDAWeb.
# Fetch the trajectory of each spacecraft from CDAWeb. Then
# interpolate the position at the time of the plot, and plot the
# spacecraft at the interpolated position.
for (i_sc, sc_id) in enumerate(spacecraft):
if verbose:
print("Fetching trajectory for %s." % sc_id)
print(f"Fetching trajectory for {sc_id}.")
# Fetch the spacecraft trajectory in whatever frame is available
# from CDAWeb.
# Fetch the spacecraft trajectory in whatever frame is
# available from CDAWeb.
sc_data = cdaweb_utils.fetch_helio_spacecraft_trajectory(
sc_id, ut_start, ut_end
)
if sc_data is None:
print("No trajectory found for %s." % sc_id)
print(f"No trajectory found for {sc_id}.")
continue
# Ingest the trajectory by converting it to the GH(MJDc) frame.
if verbose:
print("Converting ephemeris for %s into gamhelio format." % sc_id)
print(f"Converting ephemeris for {sc_id} to the gamhelio "
f"frame at MJD {MJDc}.")
t_strings = np.array([str(t) for t in sc_data["Epoch"]])
t = astropy.time.Time(t_strings, scale='utc').mjd
x, y, z = cdaweb_utils.ingest_helio_spacecraft_trajectory(sc_id, sc_data, MJDc)
if debug:
print("t, x, y, z = %s, %s, %s, %s" % (t, x, y, z))
x, y, z = cdaweb_utils.ingest_helio_spacecraft_trajectory(
sc_id, sc_data, MJDc)
# Interpolate the spacecraft position at the time for the plot.
t_sc = mjd
@@ -370,7 +582,14 @@ if __name__ == "__main__":
y_sc = np.interp(t_sc, t, y)
z_sc = np.interp(t_sc, t, z)
# If needed, compute heliocentric spherical coordinates.
# If needed, convert the position to HGS(mjd).
if hgsplot:
x_sc, y_sc, z_sc = GHtoHGS(MJDc, x_sc, y_sc, z_sc, mjd)
# If needed, compute heliocentric spherical coordinates
# for the interpolated spacecraft position. Longitude is in
# the -180 to +180 range. Convert to 0-360 if not using
# hgsplot.
if pic == "pic3" or pic == "pic4":
rxy = np.sqrt(x_sc**2 + y_sc**2)
theta = np.arctan2(rxy, z_sc)
@@ -379,10 +598,12 @@ if __name__ == "__main__":
lon = np.degrees(phi)
lat_sc = lat
lon_sc = lon
if not hgsplot:
if lon_sc < 0:
lon_sc += 360
# Plot a position of the spacecraft.
# Left plot
SPACECRAFT_COLORS = list(mpl.colors.TABLEAU_COLORS.keys())
# Plot the position of the spacecraft at the plot time. Each
# spacecraft is plotted as a colored dot with a black outline.
color = SPACECRAFT_COLORS[i_sc % len(SPACECRAFT_COLORS)]
x_nudge = 0.0
y_nudge = 8.0
@@ -390,18 +611,19 @@ if __name__ == "__main__":
for ax in (AxL0, AxR0, AxL1, AxR1):
ax.plot(x_sc, y_sc, 'o', c=color)
ax.plot(x_sc, y_sc, 'o', c="black", fillstyle="none")
ax.text(x_sc + x_nudge, y_sc+ y_nudge, sc_id,
ax.text(x_sc + x_nudge, y_sc + y_nudge, sc_id,
c="black", horizontalalignment="center")
elif pic == "pic2":
for ax in (AxL0, AxR0, AxL1, AxR1):
ax.plot(x_sc, y_sc, 'o', c=color)
ax.plot(x_sc, y_sc, 'o', c="black", fillstyle="none")
ax.text(x_sc + x_nudge, y_sc+ y_nudge, sc_id,
ax.plot(x_sc, z_sc, 'o', c=color)
ax.plot(x_sc, z_sc, 'o', c="black", fillstyle="none")
ax.text(x_sc + x_nudge, z_sc + y_nudge, sc_id,
c="black", horizontalalignment="center")
elif pic == "pic3":
for ax in (AxL0, AxR0, AxL1, AxR1):
ax.plot(lon_sc, lat_sc, 'o', c=color)
ax.plot(lon_sc, lat_sc, 'o', c="black", fillstyle="none")
ax.plot(lon_sc, lat_sc, 'o', c="black",
fillstyle="none")
ax.text(lon_sc + x_nudge, lat_sc + y_nudge, sc_id,
c="black", horizontalalignment="center")
elif pic == "pic4":
@@ -412,10 +634,24 @@ if __name__ == "__main__":
c="black", horizontalalignment="center")
elif pic == "pic5":
pass
elif pic == "pic6":
for ax in (AxL0, AxR0, AxL1, AxR1):
ax.plot(x_sc, y_sc, 'o', c=color)
ax.plot(x_sc, y_sc, 'o', c="black", fillstyle="none")
ax.text(x_sc + x_nudge, y_sc + y_nudge, sc_id,
c="black", horizontalalignment="center")
elif pic == "pic7":
raise TypeError("Spacecraft not supported for pic7!")
else:
raise TypeError(f"Invalid plot code: {pic}!")
# Save the figure to a file.
path = os.path.join(fdir, fOut(ftag, pic, nStp))
path = os.path.join(fdir, fOut(ftag, pic, nStp, hgsplot))
kv.savePic(path, bLenX=40)
fig.clear()
plt.close()
toc = time.perf_counter()
print(f"Step {nStp} took {toc-tic} s")
if __name__ == "__main__":
main()

View File

@@ -9,12 +9,32 @@ Author
------
Kareem Sorathia (kareem.sorathia@jhuapl.edu)
Eric Winter (eric.winter@jhuapl.edu)
Example PBS script to use multiprocessing to make a video on Lou Data Analysis Nodes:
#!/bin/bash
#PBS -N MixVid
#PBS -j oe
#PBS -lselect=1:ncpus=36:mem=750GB
#PBS -lwalltime=2:00:00
#PBS -q ldan
#PBS -W group_list=s2521
export RUNID=“geospace”
# load modules and set python environment
source ~/.bashrc
setPy
cd $PBS_O_WORKDIR
mixpic.py -id $RUNID --vid -ncpus 36
"""
# Import standard modules.
import argparse
import sys
import os
# Import supplemental modules.
from astropy.time import Time
@@ -23,12 +43,18 @@ import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import h5py as h5
from alive_progress import alive_it
from multiprocessing import Pool
from psutil import cpu_count
# Import project-specific modules.
import kaipy.cdaweb_utils as cdaweb_utils
import kaipy.kaiH5 as kaiH5
import kaipy.kaiTools as ktools
import kaipy.remix.remix as remix
import kaipy.kaiViz as kv
import kaipy.kdefs as kd
# Program constants and defaults
@@ -48,6 +74,9 @@ default_step = -1
# Color to use for magnetic footprint positions.
FOOTPRINT_COLOR = 'red'
# Coordinate system for plotting
default_coord = 'SM'
def create_command_line_parser():
"""Create the command-line argument parser.
@@ -95,67 +124,42 @@ def create_command_line_parser():
"-v", "--verbose", action="store_true", default=False,
help="Print verbose output (default: %(default)s)."
)
parser.add_argument(
'-GTYPE', action='store_true', default=False,
help="Show RCM grid type in the eflx plot (default: %(default)s)"
)
parser.add_argument(
'-PP', action='store_true', default=False,
help="Show plasmapause (10/cc) in the eflx/nflx plot (default: %(default)s)"
)
parser.add_argument(
'-vid', action='store_true', default=False,
help="Make a video and store in mixVid directory (default: %(default)s)"
)
parser.add_argument(
'-overwrite', action='store_true', default=False,
help="Overwrite existing vid files (default: %(default)s)"
)
parser.add_argument(
'--ncpus', type=int, metavar="ncpus", default=1,
help="Number of threads to use with --vid (default: %(default)s)"
)
parser.add_argument(
'-nohash', action='store_true', default=False,
help="Don't display branch/hash info (default: %(default)s)"
)
parser.add_argument(
'--coord', type=str, metavar="coord", default=default_coord,
help="Coordinate system to use (default: %(default)s)"
)
return parser
if __name__ == "__main__":
"""Plot the ground magnetic field perturbations."""
def makePlot(i, remixFile, nStp):
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
runid = args.id
nStp = args.n
do_nflux = args.nflux
do_print = args.print
spacecraft = args.spacecraft
verbose = args.verbose
if debug:
print("args = %s" % args)
# Construct the name of the REMIX results file.
remixFile = runid + '.mix.h5'
if debug:
print("remixFile = %s" % remixFile)
# Split the original string into a list of spacecraft IDs.
if spacecraft:
spacecraft = spacecraft.split(',')
if debug:
print("spacecraft = %s" % spacecraft)
# Enumerate the steps in the results file.
nsteps, sIds = kaiH5.cntSteps(remixFile)
if debug:
print("nsteps = %s" % nsteps)
print("sIds = %s" % sIds)
# Check that the requested step exists.
if nStp >= 0 and not nStp in sIds: # ANY nStp<0 gets last step.
raise TypeError(f"Step #{nStp} not found in {remixFile}!")
# Get the times from the result file.
T = kaiH5.getTs(remixFile, sIds, aID='MJD')
if debug:
print("T = %s" % T)
if do_print:
for i, tt in enumerate(T):
print('Step#%06d: ' % sorted(sIds)[i], Time(tt, format='mjd').iso)
sys.exit(0)
# Find the time for the specified step.
if nStp == -1:
# Take the last step.
nStp = sorted(sIds)[-1]
if debug:
print("nStp = %s" % nStp)
idxStp = np.where(sIds==nStp)[0][0]
if debug:
print("idxStp = %s" % idxStp)
foundT = T[idxStp]
with h5.File(remixFile, 'r') as f5:
foundT = f5['Step#'+str(nStp)].attrs['MJD']
#foundT = T[idxStp]
if debug:
print("foundT = %s" % foundT)
print('Found time:', Time(foundT, format='mjd').iso)
@@ -163,23 +167,38 @@ if __name__ == "__main__":
if debug:
print("utS = %s" % utS)
# Create the plots in a memory buffer.
mpl.use('Agg')
# If both N and S files exist, skip it
if do_vid:
filenameN = "{}.{:0>{n}d}.png".format("remix_n", i, n=n_pad)
outPathN = os.path.join(outDir, filenameN)
filenameS = "{}.{:0>{n}d}.png".format("remix_s", i, n=n_pad)
outPathS = os.path.join(outDir, filenameN)
if not do_overwrite and os.path.exists(outPathN) and os.path.exists(outPathS):
return
# Set global plot font options.
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rc('font', size=10)
# Read the data into the remix object.
ion = remix.remix(remixFile, nStp)
if debug:
print("ion = %s" % ion)
# Make separate plots for the northern and southern hemispheres.
for h in ['NORTH', 'SOUTH']:
for h in ['NORTH','SOUTH']:
if h == 'NORTH':
hemi = 'n'
if h == 'SOUTH':
hemi = 's'
# Create the figure for the plot.
fig = plt.figure(figsize=(12, 7.5))
if do_vid:
filename = "{}.{:0>{n}d}.png".format("remix_"+hemi, i, n=n_pad)
outPath = os.path.join(outDir, filename)
else:
outPath = "remix_"+hemi+".png"
# Skip this file if it already exists and we're not supposed to overwrite
if not do_overwrite and os.path.exists(outPath) and do_vid:
continue
plt.clf()
# Add a label.
plt.figtext(
@@ -203,9 +222,9 @@ if __name__ == "__main__":
axs[3] = ion.plot('joule', gs=gs[1, 0])
axs[4] = ion.plot('energy', gs=gs[1, 1])
if do_nflux:
axs[5] = ion.plot('flux', gs=gs[1, 2])
axs[5] = ion.plot('flux', gs=gs[1, 2],doGTYPE=do_GTYPE,doPP=do_PP)
else:
axs[5] = ion.plot('eflux', gs=gs[1, 2])
axs[5] = ion.plot('eflux', gs=gs[1, 2],doGTYPE=do_GTYPE,doPP=do_PP)
# If requested, plot the magnetic footprints for the specified
# spacecraft.
@@ -256,8 +275,115 @@ if __name__ == "__main__":
r_nudge = 0.0
ax.text(fp_theta + theta_nudge, fp_r + r_nudge, sc)
# Save the plot for the current hemisphere to a file.
if h.lower() == 'north':
plt.savefig('remix_n.png', dpi=300)
# Add Branch and Hash info
if do_hash:
fig.text(0.1,0.95,f"branch/commit: {branch}/{githash}", fontsize=6)
# Save to file
kv.savePic(outPath, dpiQ=300)
#plt.close(fig)
if __name__ == "__main__":
"""Plot remix data, either a single time step or as a movie"""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
runid = args.id
nStp = args.n
do_nflux = args.nflux
do_print = args.print
spacecraft = args.spacecraft
verbose = args.verbose
do_GTYPE = args.GTYPE
do_PP = args.PP
do_vid = args.vid
do_overwrite = args.overwrite
do_hash = not args.nohash
ncpus = args.ncpus
if debug:
print("args = %s" % args)
# Construct the name of the REMIX results file.
remixFile = runid + '.mix.h5'
if debug:
print("remixFile = %s" % remixFile)
# Get branch/hash info
if do_hash:
branch = kaiH5.GetBranch(remixFile)
githash = kaiH5.GetHash(remixFile)
if debug:
print(f'branch/commit: {branch}/{githash}')
# Split the original string into a list of spacecraft IDs.
if spacecraft:
spacecraft = spacecraft.split(',')
if debug:
print("spacecraft = %s" % spacecraft)
# Enumerate the steps in the results file.
nsteps, sIds = kaiH5.cntSteps(remixFile)
sIds = sorted(sIds)
if debug:
print("nsteps = %s" % nsteps)
print("sIds = %s" % sIds)
# Check that the requested step exists.
if nStp >= 0 and not nStp in sIds: # ANY nStp<0 gets last step.
raise TypeError(f"Step #{nStp} not found in {remixFile}!")
# Get the times from the result file.
if do_print:
T = kaiH5.getTs(remixFile, sIds, aID='MJD')
if debug:
print("T = %s" % T)
for i, tt in enumerate(T):
print('Step#%06d: ' % sorted(sIds)[i], Time(tt, format='mjd').iso)
sys.exit(0)
# Create the plots in a memory buffer.
mpl.use('Agg')
# Set global plot font options.
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rc('font', size=10)
# Init figure
fig = plt.figure(figsize=(12, 7.5))
if not do_vid: # Then we are making a single image, keep original functionality
# Find the time for the specified step.
if nStp == -1:
# Take the last step.
nStp = sorted(sIds)[-1]
if debug:
print("nStp = %s" % nStp)
makePlot(nStp, remixFile, nStp)
else: # Then we make a video, i.e. series of images saved to mixVid
outDir = 'mixVid'
kaiH5.CheckDirOrMake(outDir)
# How many 0's do we need for filenames?
n_pad = int(np.log10((len(sIds)))) + 1
if ncpus == 1:
for i, nStp in enumerate(alive_it(sIds,length=kd.barLen,bar=kd.barDef)):
makePlot(i,remixFile, nStp)
else:
plt.savefig('remix_s.png', dpi=300)
# Make list of parallel arguments
ag = ((i,remixFile,nStp) for i, nStp in enumerate(sIds) )
# Check we're not exceeding cpu_count on computer
ncpus = min(int(ncpus),cpu_count(logical=False))
print('Doing multithreading on ',ncpus,' threads')
# Do parallel job
with Pool(processes=ncpus) as pl:
pl.starmap(makePlot,ag)
print("Done making all the images. Go to mixVid folder")

View File

@@ -20,7 +20,11 @@ import os
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import warnings
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from multiprocessing import Pool
from psutil import cpu_count
# Import project-specific modules.
from kaipy import cdaweb_utils
@@ -51,9 +55,6 @@ default_runid = "msphere"
# Plot the last step by default.
default_step = -1
# Name of plot output file.
fOut = "qkpic.png"
# Color to use for spacecraft position symbols.
SPACECRAFT_COLOR = 'red'
@@ -136,78 +137,44 @@ def create_command_line_parser():
"--spacecraft", type=str, metavar="spacecraft", default=None,
help="Names of spacecraft to plot positions, separated by commas (default: %(default)s)"
)
parser.add_argument(
'-vid', action='store_true', default=False,
help="Make a video and store in mixVid directory (default: %(default)s)"
)
parser.add_argument(
'-overwrite', action='store_true', default=False,
help="Overwrite existing vid files (default: %(default)s)"
)
parser.add_argument(
'--ncpus', type=int, metavar="ncpus", default=1,
help="Number of threads to use with --vid (default: %(default)s)"
)
parser.add_argument(
'-nohash', action='store_true', default=False,
help="Don't display branch/hash info (default: %(default)s)"
)
# Add an option for plot domain size.
mviz.AddSizeArgs(parser)
return parser
def makePlot(i,spacecraft,nStp):
if __name__ == "__main__":
"""Make a quick figure of a Gamera magnetosphere run."""
# Disable some warning spam if not debug
if not debug:
warnings.filterwarnings("ignore", message="The input coordinates to pcolor are interpreted as cell centers.*")
# Set up the command-line parser.
parser = create_command_line_parser()
# Name of plot output file.
if do_vid:
fOut = "{}.{:0>{n}d}.png".format("msphpic", i, n=n_pad)
outPath = os.path.join(outDir, fOut)
else:
# Name of plot output file.
fOut = "qkmsphpic.png"
outPath = fOut
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
verbose = args.verbose
fdir = args.d
ftag = args.id
nStp = args.n
doDen = args.den
noIon = args.noion
noMPI = args.nompi
doMPI = not noMPI
doJy = args.jy
doEphi = args.ephi
doSrc = args.src
doBz = args.bz
noRCM = args.norcm
doBigRCM = args.bigrcm
spacecraft = args.spacecraft
if debug:
print("args = %s" % args)
# Get the domain size in Re.
xyBds = mviz.GetSizeBds(args)
if debug:
print("xyBds = %s" % xyBds)
# Set figure parameters.
doFast = False
doIon = not noIon
figSz = (12, 7.5)
# Open the gamera results pipe.
gsph = msph.GamsphPipe(fdir, ftag, doFast=doFast)
# If needed, fetch the number of the last step.
if nStp < 0:
nStp = gsph.sFin
print("Using Step %d" % nStp)
# Check for the presence of RCM results.
rcmChk = os.path.join(fdir, "%s.mhdrcm.h5" % ftag)
doRCM = os.path.exists(rcmChk)
if debug:
print("rcmChk = %s" % rcmChk)
print("doRCM = %s" % doRCM)
# Check for the presence of remix results.
rmxChk = os.path.join(fdir, "%s.mix.h5" % ftag)
doMIX = os.path.exists(rmxChk)
if debug:
print("rmxChk = %s" % rmxChk)
print("doMIX = %s" % doMIX)
# Open RCM data if available, and initialize visualization.
if doRCM:
print("Found RCM data")
rcmdata = gampp.GameraPipe(fdir, ftag + ".mhdrcm")
mviz.vP = kv.genNorm(1.0e-2, 100.0, doLog=True)
rcmpp.doEll = not doBigRCM
if debug:
print("rcmdata = %s" % rcmdata)
# Skip this file if it already exists and we're not supposed to overwrite
if not do_overwrite and os.path.exists(outPath) and do_vid:
return
# Open remix data if available.
if doMIX:
@@ -216,9 +183,6 @@ if __name__ == "__main__":
if debug:
print("ion = %s" % ion)
# Setup the figure.
mpl.use('Agg') # Plot in memory buffer.
fig = plt.figure(figsize=figSz)
gs = gridspec.GridSpec(3, 6, height_ratios=[20, 1, 1], hspace=0.025)
if debug:
print("fig = %s" % fig)
@@ -297,7 +261,7 @@ if __name__ == "__main__":
fname = gsph.f0
if debug:
print("fname = %s" % fname)
MJD_start = kh5.tStep(fname, gSph.s0, aID="MJD")
MJD_start = kh5.tStep(fname, gsph.s0, aID="MJD")
if debug:
print("MJD_start = %s" % MJD_start)
MJD_end = kh5.tStep(fname, gsph.sFin, aID="MJD")
@@ -355,5 +319,130 @@ if __name__ == "__main__":
z_nudge = 1.0
AxR.text(sc_x_Re[-1] + x_nudge, sc_z_Re[-1] + z_nudge, sc, c=color)
# Add Branch and Hash info
if do_hash:
fig.text(0.1,0.87,f"branch/commit: {branch}/{githash}", fontsize=6)
# Save the plot to a file.
kv.savePic(fOut, bLenX=45)
kv.savePic(outPath, bLenX=45)
if __name__ == "__main__":
"""Make a quick figure of a Gamera magnetosphere run."""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
debug = args.debug
verbose = args.verbose
fdir = args.d
ftag = args.id
nStp = args.n
doDen = args.den
noIon = args.noion
noMPI = args.nompi
doMPI = not noMPI
doJy = args.jy
doEphi = args.ephi
doSrc = args.src
doBz = args.bz
noRCM = args.norcm
doBigRCM = args.bigrcm
do_vid = args.vid
do_overwrite = args.overwrite
do_hash = not args.nohash
ncpus = args.ncpus
spacecraft = args.spacecraft
if debug:
print("args = %s" % args)
# Get the domain size in Re.
xyBds = mviz.GetSizeBds(args)
if debug:
print("xyBds = %s" % xyBds)
# Set figure parameters.
doFast = False
doIon = not noIon
figSz = (12, 7.5)
# Open the gamera results pipe.
gsph = msph.GamsphPipe(fdir, ftag, doFast=doFast)
# Check for the presence of RCM results.
rcmChk = os.path.join(fdir, "%s.mhdrcm.h5" % ftag)
doRCM = os.path.exists(rcmChk)
if debug:
print("rcmChk = %s" % rcmChk)
print("doRCM = %s" % doRCM)
# Check for the presence of remix results.
rmxChk = os.path.join(fdir, "%s.mix.h5" % ftag)
doMIX = os.path.exists(rmxChk)
if debug:
print("rmxChk = %s" % rmxChk)
print("doMIX = %s" % doMIX)
# Get branch/hash info
if doMIX:
branch = kh5.GetBranch(rmxChk)
githash = kh5.GetHash(rmxChk)
if debug:
print(f'branch/commit: {branch}/{githash}')
# Open RCM data if available, and initialize visualization.
if doRCM:
print("Found RCM data")
rcmdata = gampp.GameraPipe(fdir, ftag + ".mhdrcm")
mviz.vP = kv.genNorm(1.0e-2, 100.0, doLog=True)
rcmpp.doEll = not doBigRCM
if debug:
print("rcmdata = %s" % rcmdata)
else:
rcmdata = None
# Setup the figure.
mpl.use('Agg') # Plot in memory buffer.
# Set global plot font options.
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rc('font', size=10)
# Init figure
fig = plt.figure(figsize=figSz)
if not do_vid: # If we are making a single image, keep original functionality
# If needed, fetch the number of the last step.
if nStp < 0:
nStp = gsph.sFin
print("Using Step %d" % nStp)
makePlot(nStp,spacecraft,nStp)
else: # then we make a video, i.e. series of images saved to msphVid
# Get video loop parameters
s0 = max(gsph.s0,1) # Skip Step#0
sFin = gsph.sFin
nsteps = sFin - s0
sIds = np.array(range(s0,sFin))
outDir = 'msphVid'
kh5.CheckDirOrMake(outDir)
# How many 0's do we need for filenames?
n_pad = int(np.log10(nsteps)) + 1
if ncpus == 1:
for i, nStp in enumerate(sIds):
makePlot(i, spacecraft, nStp)
else:
# Make list of parallel arguments
ag = ((i,spacecraft,nStp) for i, nStp in enumerate(sIds) )
# Check we're not exceeding cpu_count on computer
ncpus = min(int(ncpus),cpu_count(logical=False))
print('Doing multithreading on ',ncpus,' threads')
# Do parallel job
with Pool(processes=ncpus) as pl:
pl.starmap(makePlot,ag)
print("Done making all the images. Go to mixVid folder")

View File

@@ -23,6 +23,9 @@ import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import sys
from multiprocessing import Pool
from psutil import cpu_count
import warnings
# Import project-specific modules.
import kaipy.cdaweb_utils as cdaweb_utils
@@ -32,6 +35,7 @@ import kaipy.kaiH5 as kh5
import kaipy.kaiTools as ktools
import kaipy.kaiViz as kv
import kaipy.kdefs as kdefs
import kaipy.kdefs as kd
# Program constants and defaults
@@ -48,10 +52,6 @@ default_runid = "msphere"
# Plot the last step by default.
default_step = -1
# Name of plot output file.
fOut = "qkrcmpic.png"
def create_command_line_parser():
"""Create the command-line argument parser.
@@ -130,84 +130,48 @@ def create_command_line_parser():
'-wgt', action='store_true', default=False,
help="Show wRCM instead of FTE (default: %(default)s)"
)
parser.add_argument(
'-vid', action='store_true', default=False,
help="Make a video and store in mixVid directory (default: %(default)s)"
)
parser.add_argument(
'-overwrite', action='store_true', default=False,
help="Overwrite existing vid files (default: %(default)s)"
)
parser.add_argument(
'--ncpus', type=int, metavar="ncpus", default=1,
help="Number of threads to use with --vid (default: %(default)s)"
)
parser.add_argument(
'-nohash', action='store_true', default=False,
help="Don't display branch/hash info (default: %(default)s)"
)
return parser
def makePlot(i,rcmdata,nStp):
if __name__ == "__main__":
"""Make a quick figure of a Gamera magnetosphere run."""
if not debug:
# Suppress the warning
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
warnings.filterwarnings("ignore", message="The input coordinates to pcolor are interpreted as cell centers.*")
warnings.filterwarnings("ignore", message="Log scale: values of z <= 0 have been masked.*")
warnings.filterwarnings("ignore", message="No contour levels were found within the data range.*")
# Set up the command-line parser.
parser = create_command_line_parser()
# Name of plot output file.
if do_vid:
fOut = "{}.{:0>{n}d}.png".format("rcmpic", i, n=n_pad)
outPath = os.path.join(outDir, fOut)
else:
outPath = "qkrcmpic.png"
# Parse the command-line arguments.
args = parser.parse_args()
doBeta = args.beta
doBig = args.big
doBMin = args.bmin
fdir = args.d
debug = args.debug
doElec = args.elec
doFAC = args.fac
ftag = args.id + ".mhdrcm"
doT = args.kt
nStp = args.n
spacecraft = args.spacecraft
doTb = args.tbnc
verbose = args.verbose
doVol = args.vol
doWgt = args.wgt
if debug:
print("args = %s" % args)
# Skip this file if it already exists and we're not supposed to overwrite
if not do_overwrite and os.path.exists(outPath) and do_vid:
return
# Defaults from external modules.
MHDCol = rcmpp.MHDCol
MHDLW = rcmpp.MHDLW
rcmpp.doEll = not doBig
# Figure parameters
xTail = -20.0
xSun = 10.0
yMax = 15.0
xyBds = [xTail, xSun, -yMax, yMax]
figSz = (12, 6)
eCol = "slategrey"
eLW = 0.15
cLW = 0.5
vP = kv.genNorm(1.0e-1, 1.0e+2, doLog=True)
vS = kv.genNorm(0.0, 0.25)
vW = kv.genNorm(0, 1)
vV = kv.genNorm(1.0e-2, 1.0, doLog=True)
vT = kv.genNorm(0, 50)
vB = kv.genNorm(1.0e-2, 1.0e+2, doLog=True)
vI = kv.genNorm(0, 180)
vBM = kv.genNorm(0, 100)
vFAC = kv.genNorm(2.0)
Nc = 10
nMin = 1.0
nMax = 1.0e+3
vD = kv.genNorm(nMin, nMax, doLog=True)
cVals = np.logspace(1.0, 3.0, Nc)
pCMap = "viridis"
sCMap = "terrain"
dCMap = "cool"
wCMap = "bwr_r"
vCMap = "gnuplot2"
# Read the RCM results.
rcmdata = gampp.GameraPipe(fdir, ftag)
if debug:
print("rcmdata = %s" % rcmdata)
if nStp < 0: # ANY negative index gets the last step.
nStp = rcmdata.sFin
print("Using Step %d"%(nStp))
if debug:
print("nStp = %s" % nStp)
# Create the figure.
fig = plt.figure(figsize=figSz)
plt.clf()
# Create the grid for laying out the subplots.
gs = gridspec.GridSpec(3, 3, height_ratios=[20, 1.0, 1.0], hspace=0.025)
gs = gridspec.GridSpec(2, 3, height_ratios=[20, 1.0], hspace=0.025)
# Create the Axes objects for the individual plots.
AxL = fig.add_subplot(gs[0, 0])
@@ -219,6 +183,16 @@ if __name__ == "__main__":
AxC2 = fig.add_subplot(gs[-1, 1])
AxC3 = fig.add_subplot(gs[-1, -1])
# Adjust the positions of the individual subplots.
AxL.set_position([0.05, 0.1, 0.25, 1.0])
AxM.set_position([0.35, 0.1, 0.25, 1.0])
AxR.set_position([0.65, 0.1, 0.25, 1.0])
# Update the subplot parameters for visibility.
AxL.tick_params(axis='both', which='both', bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True)
AxM.tick_params(axis='both', which='both', bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True)
AxR.tick_params(axis='both', which='both', bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True)
# Create the Colorbars``.
kv.genCB(AxC1, vP, "Pressure [nPa]", cM=pCMap)
kv.genCB(AxC2, vD, "Density [#/cc]", cM=dCMap)
@@ -263,38 +237,42 @@ if __name__ == "__main__":
print("No closed field region in RCM, exiting ...")
exit()
# This is not working yet. A blank bar still shows up
doVerb = debug
doVerb = True
# Fetch the data to plot.
if doElec:
Prcm = rcmpp.GetVarMask(rcmdata, nStp, "Pe", I)
Prcm = rcmpp.GetVarMask(rcmdata, nStp, "Pe", I, doVerb=doVerb)
else:
Prcm = rcmpp.GetVarMask(rcmdata, nStp, "P", I)
Nrcm = rcmpp.GetVarMask(rcmdata, nStp, "N", I)
Pmhd = rcmpp.GetVarMask(rcmdata, nStp, "Pmhd", I)
Nmhd = rcmpp.GetVarMask(rcmdata, nStp, "Nmhd", I)
S = rcmpp.GetVarMask(rcmdata,nStp, "S", I)
toMHD = rcmpp.GetVarMask(rcmdata, nStp, "toMHD", I)
pot, pVals = rcmpp.GetPotential(rcmdata, nStp, I)
Prcm = rcmpp.GetVarMask(rcmdata, nStp, "P", I, doVerb=doVerb)
Nrcm = rcmpp.GetVarMask(rcmdata, nStp, "N", I, doVerb=doVerb)
Pmhd = rcmpp.GetVarMask(rcmdata, nStp, "Pmhd", I, doVerb=doVerb)
Nmhd = rcmpp.GetVarMask(rcmdata, nStp, "Nmhd", I, doVerb=doVerb)
S = rcmpp.GetVarMask(rcmdata,nStp, "S", I, doVerb=doVerb)
toMHD = rcmpp.GetVarMask(rcmdata, nStp, "toMHD", I, doVerb=doVerb)
pot, pVals = rcmpp.GetPotential(rcmdata, nStp, I, doVerb=doVerb)
wRCM = None
if doWgt:
wRCM = rcmpp.GetVarMask(rcmdata, nStp, "wIMAG", I)
wRCM = rcmpp.GetVarMask(rcmdata, nStp, "wIMAG", I, doVerb=doVerb)
bVol = None
if doVol:
bVol = rcmpp.GetVarMask(rcmdata, nStp, "bVol", I)
bVol = rcmpp.GetVarMask(rcmdata, nStp, "bVol", I, doVerb=doVerb)
beta = None
if doBeta:
beta = rcmpp.GetVarMask(rcmdata, nStp, "beta", I)
beta = rcmpp.GetVarMask(rcmdata, nStp, "beta", I, doVerb=doVerb)
Tb = None
if doTb:
Tb = rcmpp.GetVarMask(rcmdata,nStp,"Tb", I)
Tb = rcmpp.GetVarMask(rcmdata,nStp,"Tb", I, doVerb=doVerb)
Bmin = None
if doBMin:
Bmin = rcmpp.GetVarMask(rcmdata, nStp, "bMin", I)
Bmin = rcmpp.GetVarMask(rcmdata, nStp, "bMin", I, doVerb=doVerb)
toRCM = None
if doBig:
toRCM = rcmpp.GetVarMask(rcmdata, nStp, "IOpen", I)
toRCM = rcmpp.GetVarMask(rcmdata, nStp, "IOpen", I, doVerb=doVerb)
jBirk = None
if doFAC:
jBirk = rcmpp.GetVarMask(rcmdata, nStp, "birk", I)
jBirk = rcmpp.GetVarMask(rcmdata, nStp, "birk", I, doVerb=doVerb)
if debug:
print("Prcm = %s" % Prcm)
print("Nrcm = %s" % Nrcm)
@@ -314,7 +292,7 @@ if __name__ == "__main__":
# Read the dates the data file.
fStr = os.path.join(fdir, ftag + '.h5')
if debug:
if debug or do_vid:
print("fStr = %s" % fStr)
MJD = kh5.tStep(fStr, nStp, aID="MJD")
if debug:
@@ -375,14 +353,14 @@ if __name__ == "__main__":
# Assemble the left-hand plot.
AxL.set_title("RCM Pressure")
AxL.pcolor(bmX, bmY, Prcm, norm=vP, cmap=pCMap)
AxL.pcolor(bmX, bmY, Prcm, norm=vP, cmap=pCMap, shading='auto')
AxL.contour(bmX, bmY, pot, pVals, colors='grey', linewidths=cLW)
kv.addEarth2D(ax=AxL)
kv.SetAx(xyBds, AxL)
# Assemble the middle plot.
AxM.set_title("MHD Pressure")
AxM.pcolor(bmX, bmY, Pmhd, norm=vP, cmap=pCMap)
AxM.pcolor(bmX, bmY, Pmhd, norm=vP, cmap=pCMap, shading='auto')
AxM.contour(bmX, bmY, Nmhd, cVals, norm=vD, cmap=dCMap, linewidths=cLW)
kv.addEarth2D(ax=AxM)
kv.SetAx(xyBds, AxM)
@@ -409,29 +387,29 @@ if __name__ == "__main__":
# Assemble the right-hand plot.
if doWgt:
AxR.set_title("RCM Weight")
AxR.pcolor(bmX, bmY, wRCM, norm=vW, cmap=wCMap)
AxR.pcolor(bmX, bmY, wRCM, norm=vW, cmap=wCMap, shading='auto')
elif doVol:
AxR.set_title("Flux-tube Volume")
AxR.pcolor(bmX, bmY, bVol, norm=vV, cmap=vCMap)
AxR.pcolor(bmX, bmY, bVol, norm=vV, cmap=vCMap, shading='auto')
elif doT:
kT = 6.25*Prcm/Nrcm
AxR.set_title("RCM Temperature")
AxR.pcolor(bmX, bmY, kT, norm=vT, cmap=vCMap)
AxR.pcolor(bmX, bmY, kT, norm=vT, cmap=vCMap, shading='auto')
elif doBeta:
AxR.set_title("Average Beta")
AxR.pcolor(bmX, bmY, beta, norm=vB, cmap=wCMap)
AxR.pcolor(bmX, bmY, beta, norm=vB, cmap=wCMap, shading='auto')
elif doTb:
AxR.set_title("Ingestion timescale")
AxR.pcolor(bmX, bmY, Tb, norm=vI, cmap=sCMap)
AxR.pcolor(bmX, bmY, Tb, norm=vI, cmap=sCMap, shading='auto')
elif doBMin:
AxR.set_title("B Minimum")
AxR.pcolor(bmX, bmY, 1.0e+9*Bmin, norm=vBM, cmap=sCMap)
AxR.pcolor(bmX, bmY, 1.0e+9*Bmin, norm=vBM, cmap=sCMap, shading='auto')
elif doFAC:
AxR.set_title("Vasyliunas FAC")
AxR.pcolor(bmX, bmY, jBirk, norm=vFAC, cmap=wCMap)
AxR.pcolor(bmX, bmY, jBirk, norm=vFAC, cmap=wCMap, shading='auto')
else:
AxR.set_title("Flux-Tube Entropy")
AxR.pcolor(bmX, bmY, S, norm=vS, cmap=sCMap)
AxR.pcolor(bmX, bmY, S, norm=vS, cmap=sCMap, shading='auto')
AxR.plot(bmX, bmY, color=eCol, linewidth=eLW)
AxR.plot(bmX.T, bmY.T, color=eCol, linewidth=eLW)
kv.addEarth2D(ax=AxR)
@@ -478,9 +456,150 @@ if __name__ == "__main__":
y_nudge = 1.0
AxR.text(sc_X[i_sc][-1] + x_nudge, sc_Y[i_sc][-1] + y_nudge, sc, c=color)
# Set left labels for subplots.
ylabel = 'X [R$_E$]'
AxL.set_ylabel(ylabel)
# Set bottom labels for subplots.
xlabel = 'Y [R$_E$]'
AxL.set_xlabel(xlabel)
AxM.set_xlabel(xlabel)
AxR.set_xlabel(xlabel)
# Create the title for the complete figure.
tStr = "\n\n\n" + utDT.strftime("%m/%d/%Y, %H:%M:%S")
plt.suptitle(tStr, fontsize="x-large")
# Add Branch and Hash info
if do_hash:
fig.text(0.1,0.85,f"branch/commit: {branch}/{githash}", fontsize=4)
# Adjust layout to reduce white space
plt.subplots_adjust(top=0.9, bottom=0.1, hspace=0.025)
# Save the figure to a file.
kv.savePic(fOut)
kv.savePic(outPath, dpiQ=300)
if __name__ == "__main__":
"""Make a quick figure of a Gamera magnetosphere run."""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
doBeta = args.beta
doBig = args.big
doBMin = args.bmin
fdir = args.d
debug = args.debug
doElec = args.elec
doFAC = args.fac
ftag = args.id + ".mhdrcm"
doT = args.kt
nStp = args.n
spacecraft = args.spacecraft
doTb = args.tbnc
verbose = args.verbose
doVol = args.vol
doWgt = args.wgt
do_vid = args.vid
do_overwrite = args.overwrite
do_hash = not args.nohash
ncpus = args.ncpus
if debug:
print("args = %s" % args)
# Defaults from external modules.
MHDCol = rcmpp.MHDCol
MHDLW = rcmpp.MHDLW
rcmpp.doEll = not doBig
# Figure parameters
xTail = -20.0
xSun = 10.0
yMax = 15.0
xyBds = [xTail, xSun, -yMax, yMax]
figSz = (12, 6)
eCol = "slategrey"
eLW = 0.15
cLW = 0.5
vP = kv.genNorm(1.0e-1, 1.0e+2, doLog=True)
vS = kv.genNorm(0.0, 0.25)
vW = kv.genNorm(0, 1)
vV = kv.genNorm(1.0e-2, 1.0, doLog=True)
vT = kv.genNorm(0, 50)
vB = kv.genNorm(1.0e-2, 1.0e+2, doLog=True)
vI = kv.genNorm(0, 180)
vBM = kv.genNorm(0, 100)
vFAC = kv.genNorm(2.0)
Nc = 10
nMin = 1.0
nMax = 1.0e+3
vD = kv.genNorm(nMin, nMax, doLog=True)
cVals = np.logspace(1.0, 3.0, Nc)
pCMap = "viridis"
#sCMap = "terrain"
sCMap = "turbo"
dCMap = "cool"
wCMap = "bwr_r"
vCMap = "gnuplot2"
# Read the RCM results.
rcmdata = gampp.GameraPipe(fdir, ftag)
fnrcm = os.path.join(fdir, f'{ftag}.h5')
# Get branch/hash info
if do_hash:
branch = kh5.GetBranch(fnrcm)
githash = kh5.GetHash(fnrcm)
if debug:
print(f'branch/commit: {branch}/{githash}')
if debug:
print("rcmdata = %s" % rcmdata)
# Set up Figure Parameters
# Set global plot font options.
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rc('font', size=10)
# Init figure.
fig = plt.figure(figsize=figSz)
if not do_vid: # Then we are making a single image, keep original functionality
if nStp < 0: # ANY negative index gets the last step.
nStp = rcmdata.sFin
print("Using Step %d"%(nStp))
if debug:
print("nStp = %s" % nStp)
makePlot(nStp,rcmdata,nStp)
else: # Then we make a video, i.e. series of images saved to rcmVid
# Get video loop parameters
s0 = max(rcmdata.s0,1) # Skip Step#0
sFin = rcmdata.sFin
nsteps = sFin - s0
sIds = np.array(range(s0,sFin))
outDir = 'rcmVid'
kh5.CheckDirOrMake(outDir)
# How many 0's do we need for filenames?
n_pad = int(np.log10(nsteps)) + 1
if ncpus == 1:
for i, nStp in enumerate(sIds):
makePlot(i,rcmdata, nStp)
else:
# Make list of parallel arguments
ag = ((i,rcmdata,nStp) for i, nStp in enumerate(sIds) )
# Check we're not exceeding cpu_count on computer
ncpus = min(int(ncpus),cpu_count(logical=False))
print('Doing multithreading on ',ncpus,' threads')
# Do parallel job
with Pool(processes=ncpus) as pl:
pl.starmap(makePlot,ag)
print("Done making all the images. Go to mixVid folder")

View File

@@ -48,15 +48,28 @@ if __name__ == "__main__":
fOut = swtag.split('.')[0]+'.'+imgtype
# pulling UT variable for plotting
t0Fmt = "%Y-%m-%d %H:%M:%S"
t0Fmts = ["%Y-%m-%d %H:%M:%S","%Y-%m-%dT%H:%M:%S.%f"]
utfmt='%H:%M \n%Y-%m-%d'
UTall = kh5.PullVar(swIn,"UT")
#Identify the correct time format
t0Fmt = None
for tfmt in t0Fmts:
try:
datetime.datetime.strptime(UTall[1].decode('utf-8'),tfmt)
t0Fmt = tfmt
break # datetime parse succeeded
except ValueError:
pass # datetime parse failed
if t0Fmt is None:
print("Time format in bcwind.h5 did not match any expected format.")
sys.exit()
utall = []
for n in range(len(UTall)):
utall.append(datetime.datetime.strptime(UTall[n].decode('utf-8'),t0Fmt))
# pulling the solar wind values from the table
varlist = kh5.getRootVars(swIn)
D = kh5.PullVar(swIn,"D")
@@ -74,6 +87,6 @@ if __name__ == "__main__":
else:
pltInterp = 0*D
doEps = False
swBCplots.swQuickPlot(UTall,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,fOut,doEps=doEps,doTrim=doTrim)
swBCplots.swQuickPlot(UTall,D,Temp,Vx,Vy,Vz,Bx,By,Bz,SYMH,pltInterp,fOut,doEps=doEps,doTrim=doTrim,t0fmt=t0Fmt)

View File

@@ -26,5 +26,5 @@ else
endif
setenv PATH ${PATH}:${KAIJUHOME}/scripts:${KAIJUHOME}/scripts/datamodel:${KAIJUHOME}/scripts/helio:${KAIJUHOME}/scripts/legacy:${KAIJUHOME}/scripts/preproc:${KAIJUHOME}/scripts/postproc:${KAIJUHOME}/scripts/quicklook
setenv PATH ${PATH}:${KAIJUHOME}/scripts:${KAIJUHOME}/scripts/datamodel:${KAIJUHOME}/scripts/helio:${KAIJUHOME}/scripts/legacy:${KAIJUHOME}/scripts/preproc:${KAIJUHOME}/scripts/postproc:${KAIJUHOME}/scripts/quicklook:$KAIJUHOME/scripts/makeitso:$KAIJUHOME/scripts/makeitso-gamhelio

View File

@@ -16,5 +16,4 @@ ROOT_DIR="$(echo "$SCRIPT_DIR" | sed 's:/scripts$::')"
# using only export= and not export+= to work with bash and zsh
export KAIJUHOME="$ROOT_DIR"
export PYTHONPATH="$PYTHONPATH:$KAIJUHOME"
export PATH="$PATH:$KAIJUHOME/scripts:$KAIJUHOME/scripts/datamodel:$KAIJUHOME/scripts/helio:$KAIJUHOME/scripts/legacy:$KAIJUHOME/scripts/preproc:$KAIJUHOME/scripts/postproc:$KAIJUHOME/scripts/quicklook"
export PATH="$PATH:$KAIJUHOME/scripts:$KAIJUHOME/scripts/datamodel:$KAIJUHOME/scripts/helio:$KAIJUHOME/scripts/legacy:$KAIJUHOME/scripts/preproc:$KAIJUHOME/scripts/postproc:$KAIJUHOME/scripts/quicklook:$KAIJUHOME/scripts/makeitso:$KAIJUHOME/scripts/makeitso-gamhelio:$KAIJUHOME/scripts/makeitso-gamhelio"

View File

@@ -1,2 +1,2 @@
file(GLOB base_srcs *.F90 defs/*.F90 types/*.F90)
file(GLOB base_srcs *.F90 defs/*.F90 types/*.F90 shellutils/*.F90)
add_library(baselib ${base_srcs})

View File

@@ -16,9 +16,9 @@ module cmidefs
!Default spinup time [s]
real(rp), parameter :: tSpinDef = 14400.0
real(rp), parameter :: defD0 = dalton*1.e6 !AMU/cc -> kg/m3
real(rp), parameter :: defV0 = 100.e3 ! 100 km/s in m/s
real(rp), parameter :: defB0 = sqrt(Mu0*defD0)*defV0*1.0e+9 !T->nT
real(rp), parameter :: defP0 = defD0*defV0*defV0*1.0e+9 !P->nPa
real(rp), parameter :: defD0 = dalton*1.0D+6 !AMU/cc -> kg/m3
real(rp), parameter :: defV0 = 1.0D+5 ! 100 km/s in m/s
real(rp), parameter :: defB0 = sqrt(Mu0*defD0)*defV0*1.0D+9 !T->nT
real(rp), parameter :: defP0 = defD0*defV0*defV0*1.0D+9 !P->nPa
end module cmidefs

View File

@@ -4,9 +4,9 @@ module mixdefs
use kdefs
implicit none
integer, parameter :: nVars = 25 ! change together wiht the enumerator below
integer, parameter :: nVars = 27 ! change together with the enumerator below
enum, bind(C)
enumerator :: POT=1,FAC,SIGMAP,SIGMAH,SOUND_SPEED,DENSITY,AVG_ENG,NUM_FLUX,NEUTRAL_WIND,EFIELD,IM_EAVG,IM_EFLUX,IM_IAVG,IM_IFLUX,Z_EAVG,Z_NFLUX,CRPOT,TPOT,IM_GTYPE,AUR_TYPE,IM_BETA,IM_EDEN,IM_EPRE,IM_ENFLX,IM_INFLX
enumerator :: POT=1,FAC,SIGMAP,SIGMAH,SOUND_SPEED,DENSITY,AVG_ENG,NUM_FLUX,NEUTRAL_WIND,EFIELD,IM_EAVG,IM_EFLUX,IM_IAVG,IM_IFLUX,Z_EAVG,Z_NFLUX,CRPOT,TPOT,IM_GTYPE,AUR_TYPE,IM_BETA,IM_EDEN,IM_EPRE,IM_ENFLX,IM_INFLX,DELTAE,IM_NPSP
end enum
! enumerator for MHD->MIX variables
@@ -19,16 +19,12 @@ module mixdefs
enumerator :: MHDPSI=1
end enum
enum, bind(C)
enumerator :: NORTH=1,SOUTH
end enum
enum, bind(C)
enumerator :: AMIE=1,MOEN_BREKKE,LOMPE
end enum
enum, bind(C)
enumerator :: FEDDER=1,ZHANG,RCMHD,RCMONO,RCMFED
enumerator :: FEDDER=1,ZHANG,LINMRG
end enum
enum, bind(C)
@@ -36,7 +32,7 @@ module mixdefs
end enum
enum, bind(C)
enumerator :: AT_MHD=1,AT_RCM,AT_RMnoE,AT_RMfnE,AT_RMono
enumerator :: AT_NoPre=0,AT_MHD,AT_RCM,AT_RMnoE,AT_RMfnE,AT_RMono
end enum
! enumerator for transform variables

View File

@@ -38,7 +38,7 @@ module rcmdefs
REAL(krp), PARAMETER :: tiote_RCM = 4.0
enum, bind(C)
enumerator :: ELOSS_FDG=1,ELOSS_SS,ELOSS_C05,ELOSS_C19,ELOSS_WM !Choice of electron loss model
enumerator :: ELOSS_FDG=1,ELOSS_SS,ELOSS_WM !Choice of electron loss model
end enum
REAL(krp), PARAMETER :: bMin_C_DEF = 1.0 ![nT], default min allowable field strength

View File

@@ -33,7 +33,7 @@ module earthhelper
real(rp), parameter, private :: GLMin = 1.0
real(rp), parameter, private :: GLMax = 8.0
integer, private :: kpDefault = 1
real(rp), private :: kpDefault = 1.0
!Toy code for putting in a quiet time RC
@@ -58,20 +58,20 @@ module earthhelper
!Routine to change kp-default on the fly if necessary
subroutine SetKp0(kp)
integer, intent(in) :: kp
real(rp), intent(in) :: kp
!Can only be 1,5
if ( (kp>=1) .and. (kp<=5) ) then
if ( (kp>=1.0) .and. (kp<=5.0) ) then
kpDefault = kp
else if (kp>5) then
kpDefault = 5
else if (kp>5.0) then
kpDefault = 5.0
endif
end subroutine SetKp0
!Return 2D gallagher density afa r,phi (rad)
function GallagherRP(r,phi,kpO) result(D)
real(rp), intent(in) :: r,phi
integer, intent(in), optional :: kpO
real(rp), intent(in), optional :: kpO
real(rp) :: D
real(rp) :: x,y
x = r*cos(phi)
@@ -87,7 +87,7 @@ module earthhelper
!Return 2D gallagher density
function GallagherXY(x,y,kpO) result(D)
real(rp), intent(in) :: x,y
integer, intent(in), optional :: kpO
real(rp), intent(in), optional :: kpO
real(rp) :: D
real(rp), dimension(8) :: xfn,yfn,xfd,yfd
@@ -97,9 +97,9 @@ module earthhelper
D = 0.0
if (present(kpO)) then
kp = kpO
kp = nint(kpO) ! Gallagher model takes integer Kp
else
kp = kpDefault
kp = nint(kpDefault)
endif
select case(kp)

View File

@@ -1,5 +1,8 @@
!Various routines to read/write HDF5 files
!> Various routines to read/write HDF5 files
!> @notes
!> Useful user routines
!> AddOutVar, AddInVar, WriteVars,ReadVars
!> FindIO: Example, call FindIO(IOVars,"Toy",n), IOVars(n) = Toy data/metadata
module ioH5
use kdefs
use ISO_C_BINDING
@@ -10,12 +13,15 @@ module ioH5
use ioH5Overload
use files
use dates
!Useful user routines
!AddOutVar, AddInVar, WriteVars,ReadVars
!FindIO: Example, call FindIO(IOVars,"Toy",n), IOVars(n) = Toy data/metadata
implicit none
! timeAttributeCache options
integer(HSIZE_T), parameter, private :: CACHE_CHUNK_SIZE = 256
character(len=strLen), parameter :: attrGrpName = "timeAttributeCache"
integer, private :: cacheSize = 0, stepOffset = 0
logical, private :: createdThisFile = .false., isFirstStep = .true.
!Overloader to add data (array or scalar/string) to output chain
interface AddOutVar
module procedure AddOut_5D,AddOut_4D,AddOut_3D,AddOut_2D,AddOut_1D,AddOut_Int,AddOut_DP,AddOut_SP,AddOut_Str
@@ -29,15 +35,16 @@ module ioH5
end interface IOArrayFill
contains
!Routine to stamp output file with various information
!> Routine to stamp output file with various information
subroutine StampIO(fIn)
character(len=*), intent(in) :: fIn
character(len=strLen) :: gStr,dtStr,bStr
type(IOVAR_T), dimension(10) :: IOVars
!Check if this file has already been stamped (has githash)
!NOTE: This creates ambiguity when doing restarts using binaries w/ different hashes
!But what're you gonna do?
!> Check if this file has already been stamped (has githash)
!> @NOTE: This creates ambiguity when doing restarts using binaries w/ different hashes
!> But what're you gonna do?
if ( ioExist(fIn,"GITHASH") ) then
!Already stamped, let's get outta here
return
@@ -59,10 +66,12 @@ contains
#endif
call AddOutVar(IOVars,"DATETIME",dtStr)
call WriteVars(IOVars,.true.,fIn,doStampCheckO=.false.)
end subroutine StampIO
!-------------------------------------------
!Various routines to quickly pull scalars from IOVar_T
! -------------------------------------------
! Various routines to quickly pull scalars from IOVar_T
!
!> Helper function to pull INT from an IOVar data
function GetIOInt(IOVars,vID) result(vOut)
type(IOVAR_T), dimension(:), intent(in) :: IOVars
character(len=*), intent(in) :: vID
@@ -72,6 +81,7 @@ contains
vOut = IOVars(nvar)%data(1)
end function GetIOInt
!> Helper funciton to pull REAL from an IOVar data
function GetIOReal(IOVars,vID) result(vOut)
type(IOVAR_T), dimension(:), intent(in) :: IOVars
character(len=*), intent(in) :: vID
@@ -81,10 +91,13 @@ contains
nvar = FindIO(IOVars,vID,.true.)
vOut = IOVars(nvar)%data(1)
end function GetIOReal
!-------------------------------------------
!Various routines to get information about step structure of H5 file
!-------------------------------------------
!Various routines to get information about step structure of H5 file
!> Determine grid size from HDF5 file
function GridSizeH5(fIn) result(Nijk)
!> File Name
character(len=*), intent(in) :: fIn
integer, dimension(NDIM) :: Nijk
@@ -124,10 +137,16 @@ contains
endif
end function GridSizeH5
!Get number of groups of form "Step#XXX" and start/end
!> Get number of groups of form "Step#XXX" and start/end
subroutine StepInfo(fStr,s0,sE,Nstp)
!> File Name
character(len=*), intent(in) :: fStr
integer, intent(out) :: s0,sE,Nstp
!> Step Number
integer, intent(out) :: s0
!> Step End
integer, intent(out) :: sE
!> Number of Steps
integer, intent(out) :: Nstp
integer :: herr,i
logical :: gExist,fExist,isEnd,idFirst
integer(HID_T) :: h5fId
@@ -180,7 +199,7 @@ contains
end subroutine StepInfo
!Find step times between s0,sE and store in pre-allocated array
!> Find step times between s0,sE and store in pre-allocated array
subroutine StepTimes(fStr,s0,sE,Ts)
character(len=*), intent(in) :: fStr
integer, intent(in) :: s0,sE
@@ -209,7 +228,7 @@ contains
end subroutine StepTimes
!Same as StepTimes but for MJDs
!> Same as StepTimes but for MJDs
subroutine StepMJDs(fStr,s0,sE,MJDs)
character(len=*), intent(in) :: fStr
integer, intent(in) :: s0,sE
@@ -249,7 +268,7 @@ contains
end subroutine StepMJDs
!Count number of groups of form "Step#XXX"
!> Count number of groups of form "Step#XXX"
function NumSteps(fStr) result(Nstp)
character(len=*), intent(in) :: fStr
integer :: Nstp
@@ -259,14 +278,18 @@ contains
call StepInfo(fStr,s0,sE,Nstp)
end function NumSteps
!Checks if object exists in file fStr
!fStr:/vStr (if no gStrO), otherwise
!fStr:/gStrO/vStr
!> Checks if object exists in file fStr
!> fStr:/vStr (if no gStrO), otherwise
!> fStr:/gStrO/vStr
function ioExist(fStr,vStr,gStrO)
character(len=*), intent(in) :: fStr,vStr
character(len=*), intent(in) :: fStr
!! Filename
character(len=*), intent(in) :: vStr
!! Dataset or attribute name
character(len=*), intent(in), optional :: gStrO
!! Optional group string
logical :: ioExist
logical :: fExist, gExist, dsExist,atExist
integer(HID_T) :: h5fId, gId
integer :: herr
@@ -311,7 +334,7 @@ contains
ioExist = dsExist .or. atExist
end function ioExist
!Read into array of IOVar from file fOut/optional group ID gStr
!> Read into array of IOVar from file fOut/optional group ID gStr
subroutine ReadVars(IOVars,doIOp,baseStr,gStrO)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
logical, intent(in) :: doIOp
@@ -321,7 +344,7 @@ contains
integer :: n, Nv
! integer(HID_T) :: Nr
logical :: fExist,gExist,dsExist,aExist
logical :: fExist, gExist, dsExist = .false., aExist = .false.
integer :: herr, dsTest
integer(HID_T) :: h5fId, gId, inId
character(len=strLen) :: h5File
@@ -403,159 +426,30 @@ contains
call h5close_f(herr) !Close intereface
end subroutine ReadVars
!-------------------------------------------
!Routines to read/write individual variables or attributes
!Write array of IOVar to file fOut (from baseStr), under (optional) group gStrO
!If gStrO unspecified, written to root of HDF5
!doIOp is whether to use IOP (ie output slice) or rp (ie restart)
subroutine WriteVars(IOVars,doIOp,baseStr,gStrO,gStrOO,doStampCheckO)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
logical, intent(in) :: doIOp
character(len=*), intent(in) :: baseStr
character(len=*), intent(in), optional :: gStrO,gStrOO
logical , intent(in), optional :: doStampCheckO
logical :: fExist,gExist,doStampCheck
integer :: herr
integer(HID_T) :: h5fId, gId, ggId,outId
character(len=strLen) :: h5File
!Set filename to baseStr
!FIXME: Correct to add .h5 to baseStr
h5File = baseStr
if (present(doStampCheckO)) then
doStampCheck = doStampCheckO
else
doStampCheck = .true.
endif
!If we're writing to root of this file, then stamp (will ignore if already stamped)
if (.not. present(gStrO) .and. doStampCheck) then
call StampIO(h5File)
endif
call h5open_f(herr) !Setup Fortran interface
!Start by opening file, create if necessary
inquire(file=h5File,exist=fExist)
if (fExist) then
!Open file
call h5fopen_f(trim(h5File), H5F_ACC_RDWR_F, h5fId, herr)
else
!Create file
call h5fcreate_f(trim(h5File),H5F_ACC_TRUNC_F, h5fId, herr)
endif
!Figure out output location (outId) and create groups as necessary
if (present(gStrO)) then
!Write to group
!Check if group already exists
call h5lexists_f(h5fId,trim(gStrO),gExist,herr)
if (gExist .and. .not. present(gStrOO)) then
!Group exists (and not writing to subgroup)
!Can either skip it, or kill and replace
if (doSkipG) then
!Group exists, close up and skip it
write(*,*) 'Skipping due to pre-existing group ', trim(gStrO)
!Close up
call h5fclose_f(h5fId, herr)
call h5close_f(herr)
return
else
!Kill group
write(*,*) 'Overwriting group ', trim(h5File), '/', trim(gStrO)
call h5ldelete_f(h5fId,trim(gStrO),herr)
!Reset gExist and let next block of code recreate it
gExist = .false.
endif
endif
if (.not. gExist) then
!Create group
call h5gcreate_f(h5fId,trim(gStrO),gId,herr)
else
!Open group
call h5gopen_f(h5fId,trim(gStrO),gId,herr)
endif
if (present(gStrOO)) then
!Create subgroup
call h5gcreate_f(gId,trim(gStrOO),ggId,herr)
outId = ggId
else
outId = gId
endif
else
!Write to root
outId = h5fId
endif !gStrO
!Do writing
call WriteVars2ID(IOVars,outId,doIOp)
!Now done, close up shop
if (present(gStrOO)) call h5gclose_f(ggId,herr)
if (present(gStrO )) call h5gclose_f( gId,herr)
call h5fclose_f(h5fId,herr) !Close file
call h5close_f(herr) !Close intereface
end subroutine WriteVars
subroutine WriteVars2ID(IOVars,outId,doIOp)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
logical, intent(in) :: doIOp
integer(HID_T), intent(in) :: outId
integer :: n, Nv
integer(HID_T) :: Nr
Nv = size(IOVars)
do n=1,Nv
if (IOVars(n)%toWrite) then
!Variable is ready, write it
!Treat as scalar attribute if Nr = 0
Nr = IOVars(n)%Nr
if (Nr == 0) then
!Scalar attribute
if(IOVars(n)%useHyperslab) then
write(*,*) 'Unable to write attribute "',trim(IOVars(n)%idStr),'" as a hyperslab'
stop
else
call WriteHDFAtt(IOVars(n),outId)
endif
else
!N-rank array
!Create data space, use rank/dim info from IOVar
if(IOVars(n)%useHyperslab) then
write(*,*) 'Writing dataset "',trim(IOVars(n)%idStr),'" as a hyperslab not yet supported'
stop
else
call WriteHDFVar(IOVars(n),outId,doIOP)
endif
endif !Nr=0
endif !isSet
enddo
end subroutine WriteVars2ID
!-------------------------------------------
!Routines to read/write individual variables or attributes
!FIXME: Assuming double precision for input binary data
!> Read HDF dataset from group
subroutine ReadHDFVar(IOVar,gId)
type(IOVAR_T), intent(inout) :: IOVar
integer(HID_T), intent(in) :: gId
integer(HSIZE_T), allocatable, dimension(:) :: dims
integer :: Nr,herr, N
integer :: typeClass
integer :: Nr,herr, N, ierror
integer :: typeClass, dExists
integer(SIZE_T) :: typeSize
character(len=strLen) :: inStr
logical :: aExists
dExists = h5ltfind_dataset_f(gId, trim(IOVar%idStr))
if (dExists == 0) then
write(*,"(3A)") "Info: Dataset with name '", trim(IOVar%idStr), "' does not exist, skipping read."
return
elseif(dExists < 0) then
write(*,"(2A)") "Error: Failed to find Dataset with name '", trim(IOVar%idStr)
stop
endif
!Start by getting rank, dimensions and total size
call h5ltget_dataset_ndims_f(gId,trim(IOVar%idStr),Nr,herr)
allocate(dims(Nr))
@@ -567,21 +461,26 @@ contains
IOVar%N = N
IOVar%dims(1:Nr) = dims
if (allocated(IOVar%data)) then
deallocate(IOVar%data)
endif
allocate(IOVar%data(N))
!Read based on data type
select case(IOVar%vType)
case(IONULL,IOREAL)
call h5ltread_dataset_f(gId,trim(IOVar%idStr),H5T_NATIVE_DOUBLE,IOVar%data,dims,herr)
case default
write(*,*) 'Unknown HDF data type, bailing ...'
stop
case(IONULL,IOREAL)
if (allocated(IOVar%data)) then
deallocate(IOVar%data)
endif
allocate(IOVar%data(N))
call h5ltread_dataset_f(gId,trim(IOVar%idStr),H5T_NATIVE_DOUBLE,IOVar%data,dims,herr)
case(IOINT)
if (allocated(IOVar%data_int)) then
deallocate(IOVar%data_int)
endif
allocate(IOVar%data_int(N))
call h5ltread_dataset_f(gId,trim(IOVar%idStr),H5T_NATIVE_INTEGER,IOVar%data_int,dims,herr)
case default
write(*,*) 'Unknown HDF data type, bailing ...'
stop
end select
!Check for attribute strings
!Check for attribute strings
!Unit
call h5aexists_by_name_f(gID,trim(IOVar%idStr),"Units",aExists,herr)
if (aExists) then
@@ -599,11 +498,10 @@ contains
inStr = "NULL"
endif
IOVar%descStr = inStr
IOVar%isDone = .true.
end subroutine ReadHDFVar
! Read a dataset specified as a hyperslab. This assumes a stride of 1 in all dimensions
!> Read a dataset specified as a hyperslab. This assumes a stride of 1 in all dimensions
subroutine ReadHDFVarHyper(IOVar,gId)
type(IOVAR_T), intent(inout) :: IOVar
integer(HID_T), intent(in) :: gId
@@ -648,6 +546,7 @@ contains
end subroutine ReadHDFVarHyper
!FIXME: Add scaling to attributes
!> Read an HDF attribute from a group
subroutine ReadHDFAtt(IOVar,gId)
type(IOVAR_T), intent(inout) :: IOVar
integer(HID_T), intent(in) :: gId
@@ -674,13 +573,151 @@ contains
write(*,*) 'Unknown HDF data type, bailing ...'
stop
end select
!write(*,*) 'Read attribute ', IOVar%data(1)
IOVar%isDone = .true.
end subroutine ReadHDFAtt
!FIXME: Add scaling to attributes
subroutine WriteHDFAtt(IOVar,gId)
!> Helper function to get integer step from
!> string name of Step#XXX format
function GetStepInt(stepStr) result(nStep)
character(len=*), intent(in) :: stepStr
integer :: nStep, status
read(stepStr(6:),*,iostat=status) nStep
if(status /= 0) then
write(*,"(3A)") "Conversion of ", trim(stepStr), " failed. Update to timeAttributeCache dataset size failed."
stop
endif
end function
!> Check for resizing of the timeAttributeCache
!> datasets' size
!>
subroutine CheckAttCacheSize(stepStr, cacheId, cacheExist, cacheCreated)
character(len=*), intent(in) :: stepStr
integer(HID_T), intent(in) :: cacheId
logical, intent(in) :: cacheExist
logical, intent(in) :: cacheCreated
integer :: nStep
type(IOVAR_T) :: stepVar
nStep = GetStepInt(stepStr)
if(isFirstStep .and. nStep > 0) then ! this handles restarts, both in a new directory and in place
stepVar%idStr = "step"
stepVar%vType = IOINT
call ReadHDFVar(stepVar, cacheId)
if(allocated(stepVar%data_int)) then ! step dataset was found in existing timeAttributeCache group, otherwise continue
stepOffset = stepVar%data_int(1) ! Set offset to first step
!write(*,"(A,1x,I,A,I)"), "Step offset is:", stepOffset, ' with step array size ', stepVar%N
endif
isFirstStep = .false.
endif
if(cacheExist .and. .not. cacheCreated) then !Restart of exisitng run in same directory
! ensure that the cache array begins at current step - first step, by subtracting the offset of the first step
cacheSize = (nStep - stepOffset) + 1
!write(*,"(A,1x,I,1x,A,1x,I)") "timeAttributeCache exists, and not just created, set cacheSize to", cacheSize, "with stepOffset", stepOffset
else ! New run, or restart in a new directory
!write(*,"(A,1x,I)") "timeAttributeCache exists, and just created, set cachesize to ", cacheSize + 1
cacheSize = cacheSize + 1 ! Increase the cacheSize by one for next time step
stepOffset = nStep
endif
end subroutine
!> Writes an Attribute that is a float or integer to
!> timeAttributeCache group for each attribute written
!> to a Step# group
subroutine WriteCacheAtt(IOVar,gId)
!> IOVar to write
type(IOVAR_T), intent(inout) :: IOVar
!> Group ID of attribute cache
integer(HID_T), intent(in) :: gId
integer(HID_T) :: sId, dId, pId, memId
integer :: herr
integer :: Nr = 1
integer :: memRank = 1
logical :: dSetExists = .False.
real(rp) :: X
integer(HSIZE_T) :: dSize = 1, rank = 1
integer(HSIZE_T), dimension(1) :: cdims(1), maxdims(1), dataDim(1), memDim(1)
integer(HSIZE_T), dimension(1,1) :: coord
dataDim = (/1/)
memDim = (/1/)
maxdims(1) = H5S_UNLIMITED_F
call h5ltpath_valid_f(gId, trim(IOVar%idStr), .True., dSetExists, herr)
! Create dataspace and dataset initially in group
if (.not. dSetExists) then
!write(*,"(A,1x,A)") "Create var ", trim(IOVar%idStr)
cdims(1) = 1
call h5screate_simple_f(Nr, cdims, sId, herr, maxdims=maxdims)
call h5pcreate_f(H5P_DATASET_CREATE_F, pId, herr)
cdims(1) = CACHE_CHUNK_SIZE
call h5pset_chunk_f(pId, Nr, cdims, herr)
select case(IOVar%vType)
case(IONULL,IOREAL)
call h5dcreate_f(gId, trim(IOVar%idStr), H5T_NATIVE_DOUBLE, sId, &
dId, herr, dcpl_id=pId)
case(IOINT)
call h5dcreate_f(gId, trim(IOVar%idStr), H5T_NATIVE_INTEGER, sId, &
dId, herr, dcpl_id=pId)
end select
coord(1,1) = 1
call h5sselect_elements_f(sId, H5S_SELECT_SET_F, Nr, 1_size_t, coord, herr)
call h5pclose_f(pId,herr)
else
!write(*,"(A,1x,A)") "Found var ", trim(IOVar%idStr)
! Open dataset on subsequent time steps
call h5dopen_f(gId, trim(IOVar%idStr), dId, herr)
! Get the proper dataspae to select the element position
! of the next attribute element to add and write to the dataset
call h5dget_space_f(dId, sId, herr)
! Resize dataset
dSize = cacheSize
call h5dset_extent_f(dId, (/dSize/), herr)
! Create memory space for (extended) dataset addition
cdims(1) = 1
call h5screate_simple_f(Nr, cdims, memId, herr)
! Select the single element at coord = offset in the file space
coord(1,1) = cacheSize
call h5sselect_elements_f(sId, H5S_SELECT_SET_F, Nr, 1_size_t, coord, herr)
end if
select case(IOVar%vType)
case(IONULL,IOREAL)
X = IOVar%data(1)
if (dSetExists) then
call h5dwrite_f(dId, H5T_NATIVE_DOUBLE, X, dataDim, herr, &
mem_space_id=memId, file_space_id=sId)
call h5sclose_f(memId,herr)
else
call h5dwrite_f(dId, H5T_NATIVE_DOUBLE, X, dataDim, herr)
end if
case(IOINT)
X = IOVar%data(1)
if (dSetExists) then
call h5dwrite_f(dId, H5T_NATIVE_INTEGER, int(X), dataDim, herr, &
mem_space_id=memId, file_space_id=sId)
call h5sclose_f(memId,herr)
else
call h5dwrite_f(dId, H5T_NATIVE_INTEGER, int(X), dataDim, herr)
end if
end select
! Cleanup
call h5dclose_f(dId,herr)
call h5sclose_f(sId,herr)
end subroutine WriteCacheAtt
!FIXME: Add scaling to attributes
!> Write a variable as an attribue for the
!> specified HDF group
subroutine WriteHDFAtt(IOVar,gId)
!> Variable to write to group
type(IOVAR_T), intent(inout) :: IOVar
!> HDF ID for Group
integer(HID_T), intent(in) :: gId
integer :: herr
@@ -688,36 +725,46 @@ contains
!Write based on data type
select case(IOVar%vType)
case(IONULL,IOREAL)
X = IOVar%data(1)
call writeReal2HDF(gId,trim(IOVar%idStr),X)
case(IOINT)
X = IOVar%data(1)
call writeInt2HDF(gId,trim(IOVar%idStr),int(X))
case(IOSTR)
call writeString2HDF(gId,trim(IOVar%idStr),trim(IOVar%dStr))
!call h5ltmake_dataset_string_f(gID,trim(IOVar%idStr),trim(IOVar%dStr),herr)
case default
write(*,*) 'Unknown HDF data type, bailing ...'
stop
case(IONULL,IOREAL)
X = IOVar%data(1)
call writeReal2HDF(gId,trim(IOVar%idStr),X)
case(IOINT)
X = IOVar%data(1)
call writeInt2HDF(gId,trim(IOVar%idStr),int(X))
case(IOSTR)
call writeString2HDF(gId,trim(IOVar%idStr),trim(IOVar%dStr))
!call h5ltmake_dataset_string_f(gID,trim(IOVar%idStr),trim(IOVar%dStr),herr)
case default
write(*,*) 'Unknown HDF data type, bailing ...'
stop
end select
IOVar%isDone = .true.
end subroutine WriteHDFAtt
!FIXME: Assuming IOP is single and double otherwise
!FIXME: Add variable attributes (units, scaling, etc)
!> Write a variable to an HDF dataset and add
!> attributes to the dataset
subroutine WriteHDFVar(IOVar,gId,doIOPO)
type(IOVAR_T), intent(inout) :: IOVar
integer(HID_T), intent(in) :: gId
logical, intent(in), optional :: doIOPO
!> IO Var to write
type(IOVAR_T), intent(inout) :: IOVar
!> Group ID
integer(HID_T), intent(in) :: gId
!> Flag to do IO Precision
logical, intent(in), optional :: doIOPO
logical :: doIOP !Do IO precision for reals
integer :: Nr
integer :: herr
integer(HSIZE_T) :: h5dims(MAXIODIM)
integer(HID_T) :: dId, sId, pId
real(rp) :: vScl
integer(HSIZE_T), dimension(:), allocatable :: cdims
Nr = IOVar%Nr
allocate(cdims(Nr))
!allocate(data(size(IOVar%data)))
!data = 0.0_dp
h5dims(1:Nr) = IOVar%dims(1:Nr)
vScl = IOVar%scale
@@ -732,12 +779,15 @@ contains
case(IONULL,IOREAL)
!Assume real by default
if (doIOP) then
call h5ltmake_dataset_float_f (gId,trim(IOVar%idStr),Nr,h5dims(1:Nr),real(vScl*IOVar%data,sp),herr)
call h5ltmake_dataset_float_f(gId,trim(IOVar%idStr), Nr, h5dims(1:Nr), &
real(vScl*IOVar%data,sp),herr)
else
call h5ltmake_dataset_double_f(gId,trim(IOVar%idStr),Nr,h5dims(1:Nr),real(vScl*IOVar%data,dp),herr)
endif
call h5ltmake_dataset_double_f(gId,trim(IOVar%idStr), Nr, h5dims(1:Nr), &
real(vScl*IOVar%data,dp),herr)
endif
case(IOINT)
call h5ltmake_dataset_int_f(gId,trim(IOVar%idStr),Nr,h5dims(1:Nr),int(vScl*IOVar%data),herr)
call h5ltmake_dataset_int_f(gId,trim(IOVar%idStr), Nr, h5dims(1:Nr), &
int(vScl*IOVar%data),herr)
case default
write(*,*) 'Unknown HDF data type, bailing ...'
stop
@@ -750,8 +800,204 @@ contains
IOVar%isDone = .true.
end subroutine WriteHDFVar
!-------------------------------------------
!These routines add data for input to IO chain
!> Write array of IOVar to file fOut (from baseStr), under (optional) group gStrO
!> @note If gStrO unspecified, written to root of HDF5
!> doIOp is whether to use IOP (ie output slice) or rp (ie restart)
subroutine WriteVars(IOVars,doIOp,baseStr,gStrO,gStrOO,doStampCheckO)
!> Array of IOVars
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
!> Do IO Precision writes
logical, intent(in) :: doIOp
!> Base filename
character(len=*), intent(in) :: baseStr
!> Group Name
character(len=*), intent(in), optional :: gStrO
!> Subgroup Name
character(len=*), intent(in), optional :: gStrOO
!> Check if output file has been stamped
logical , intent(in), optional :: doStampCheckO
logical :: fExist, gExist, doStampCheck
logical :: writeCache, cacheExist, cacheCreate
integer :: herr
integer(HID_T) :: h5fId, gId, ggId, outId, cacheId
character(len=strLen) :: h5File
type(IOVAR_T) :: stepVar
!Set filename to baseStr
!FIXME: Correct to add .h5 to baseStr
h5File = baseStr
writeCache = .false.
cacheCreate = .false.
if (present(doStampCheckO)) then
doStampCheck = doStampCheckO
else
doStampCheck = .true.
endif
!If we're writing to root of this file, then stamp (will ignore if already stamped)
if (.not. present(gStrO) .and. doStampCheck) then
call StampIO(h5File)
endif
call h5open_f(herr) !Setup Fortran interface
!Start by opening file, create if necessary
inquire(file=h5File,exist=fExist)
if (fExist) then
!Open file
call h5fopen_f(trim(h5File), H5F_ACC_RDWR_F, h5fId, herr)
else
!Create file
createdThisFile = .true.
call h5fcreate_f(trim(h5File),H5F_ACC_TRUNC_F, h5fId, herr)
endif
!Figure out output location (outId) and create groups as necessary
if (present(gStrO)) then
!Write to group
!Check if group already exists
call h5lexists_f(h5fId,trim(gStrO),gExist,herr)
if (gExist .and. .not. present(gStrOO)) then
!Group exists (and not writing to subgroup)
!Can either skip it, or kill and replace
if (doSkipG) then
!Group exists, close up and skip it
write(*,*) 'Skipping due to pre-existing group ', trim(gStrO)
!Close up
call h5fclose_f(h5fId, herr)
call h5close_f(herr)
return
else
!Kill group
write(*,*) 'Overwriting group ', trim(h5File), '/', trim(gStrO)
call h5ldelete_f(h5fId,trim(gStrO),herr)
!Reset gExist and let next block of code recreate it
gExist = .false.
endif
endif
if (.not. gExist) then
!Create group
call h5gcreate_f(h5fId,trim(gStrO),gId,herr)
else
!Open group
call h5gopen_f(h5fId,trim(gStrO),gId,herr)
endif
if(trim(toUpper(gStrO(1:5))) == "STEP#") then
writeCache = .true.
!Check if cache group exists
call h5lexists_f(h5fId,trim(attrGrpName),cacheExist,herr)
if (.not. cacheExist) then
if(.not. createdThisFile) then
write(*,*) "Attempt to create the timeAttributeCache in an existing h5 file", &
" that does not have the cache group."
write(*,*) "Perform restart in a different directory, or create the timeAttributeCache", &
" and populate it in the exisitng h5 file."
stop
endif
!Create cache group
call h5gcreate_f(h5fId,trim(attrGrpName),cacheId,herr)
cacheCreate = .true.
endif
! Open attribute cache group
call h5gopen_f(h5fId,trim(attrGrpName),cacheId,herr)
! Check attribute cache size and resize
call CheckAttCacheSize(trim(gStrO), cacheId, cacheExist, cacheCreate)
! Write Step# to cache
stepVar%Nr = 0
stepVar%idStr = "step"
stepVar%vType = IOINT
stepVar%data = [GetStepInt(trim(gStrO))]
call WriteCacheAtt(stepVar,cacheId)
endif
if (present(gStrOO)) then
!Create subgroup
call h5gcreate_f(gId,trim(gStrOO),ggId,herr)
outId = ggId
else
outId = gId
endif
else
!Write to root
outId = h5fId
endif !gStrO
!Do writing
if(present(doStampCheckO)) then
call WriteVars2ID(IOVars,outId,doIOp,isRoot=.true.)
else
! Write step vars and attributes to group outId
if(writeCache) then
call WriteVars2ID(IOVars,outId,doIOp, &
cacheId=cacheId)
else
call WriteVars2ID(IOVars,outId,doIOp)
end if
end if
!Now done, close up shop
if (present(gStrOO)) call h5gclose_f(ggId,herr)
if (present(gStrO )) call h5gclose_f( gId,herr)
if (writeCache) call h5gclose_f(cacheId,herr) !Close cache group
call h5fclose_f(h5fId,herr) !Close file
call h5close_f(herr) !Close intereface
end subroutine WriteVars
!> Write out all IOVars to their respective IDs
subroutine WriteVars2ID(IOVars,outId,doIOp,cacheId,isRoot)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
logical, intent(in) :: doIOp
integer(HID_T), intent(in) :: outId
integer(HID_T), intent(in), optional :: cacheId
logical, intent(in), optional :: isRoot
integer :: n, Nv
integer(HID_T) :: Nr
Nv = size(IOVars)
do n=1,Nv
if (IOVars(n)%toWrite) then
!Variable is ready, write it
!Treat as scalar attribute if Nr = 0
Nr = IOVars(n)%Nr
if (Nr == 0) then
!Scalar attribute
if(IOVars(n)%useHyperslab) then
write(*,*) 'Unable to write attribute "',trim(IOVars(n)%idStr),'" as a hyperslab'
stop
else
call WriteHDFAtt(IOVars(n),outId)
if(present(cacheId)) then
call WriteCacheAtt(IOVars(n),cacheId)
endif
endif
else
!N-rank array
!Create data space, use rank/dim info from IOVar
if(IOVars(n)%useHyperslab) then
write(*,*) 'Writing dataset "',trim(IOVars(n)%idStr),'" as a hyperslab not yet supported'
stop
else
call WriteHDFVar(IOVars(n),outId,doIOP)
endif
endif !Nr=0
endif !isSet
enddo
end subroutine WriteVars2ID
!-------------------------------------------
!These routines add data for input to IO chain
!> Add Input Variable to IOVars pool to read
subroutine AddInVar(IOVars,idStr,vTypeO,vSclO)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
character(len=*), intent(in) :: idStr
@@ -778,7 +1024,7 @@ contains
end subroutine AddInVar
! Alternate subroutine to read in a hyperslab from a dataset
!> Alternate subroutine to read in a hyperslab from a dataset
subroutine AddInVarHyper(IOVars,idStr,offsets,counts,vTypeO,vSclO)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
integer, dimension(:), intent(in) :: offsets
@@ -815,7 +1061,7 @@ contains
end subroutine AddInVarHyper
!Clears info/memory from an IO chain
!> Clears info/memory from an IO chain
subroutine ClearIO(IOVars)
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
@@ -846,10 +1092,9 @@ contains
enddo
end subroutine ClearIO
!-----------------------------
!HDF 5 helper routines
!Converts Fortran real kind to HDF5 precision
!-----------------------------
!HDF 5 helper routines
!> Converts Fortran real kind to HDF5 precision
function H5Precision(h5p) result(h5gReal)
integer, intent(in) :: h5p
integer(HID_T):: h5gReal
@@ -863,10 +1108,10 @@ contains
end function H5Precision
!-----------------------------
!Write attributes
!Write integer scalar to attribute
!-----------------------------
!Write attributes
!> Write integer scalar to attribute
subroutine writeInt2HDF(gId,vId,datIn)
integer(HID_T), intent(in) :: gId
character(len=*),intent(in) :: vId
@@ -891,8 +1136,8 @@ contains
end subroutine writeInt2HDF
!Writes a single scalar as attribute to a group/root
!Always uses RP precision
!> Writes a single scalar as attribute to a group/root
!> Always uses RP precision
subroutine writeReal2HDF(gId,vId,datIn)
integer(HID_T), intent(in) :: gId
@@ -921,6 +1166,8 @@ contains
end subroutine writeReal2HDF
!> Write a string to HDF group
!> as an attribute
subroutine writeString2HDF(gId,vId,data)
integer(HID_T), intent(in) :: gId
character(len=*),intent(in) :: vId
@@ -947,10 +1194,10 @@ contains
end subroutine writeString2HDF
!-----------------------------
!Read attributes
!-----------------------------
!Read attributes
!Read integer from HDF5 attribute
!> Read integer from HDF5 attribute
function readIntHDF(gId,vId,vDefOpt) result(vOut)
integer(HID_T), intent(in) :: gId
character(len=*),intent(in) :: vId
@@ -978,7 +1225,7 @@ contains
call h5aclose_f(attrId,herror)
end function readIntHDF
!Read real (rp) from HDF5 attribute
!> Read real (rp) from HDF5 attribute
function readRealHDF(gId,vId,vDefOpt) result(vOut)
integer(HID_T), intent(in) :: gId
character(len=*),intent(in) :: vId

View File

@@ -24,19 +24,26 @@ module ioH5types
!vType: holds info about variable type, only partially implemented
type IOVAR_T
character(len=strLen) :: idStr="NONE",unitStr="CODE",descStr="DESCRIPTION"
integer(HSIZE_T) :: dims (MAXIODIM) = 0 !Dimension information
integer(HSIZE_T) :: offsets(MAXIODIM) = 0 !Offset for reading/writing a hyperslab, optional
character(len=strLen) :: idStr="NONE"
character(len=strLen) :: unitStr="CODE"
character(len=strLen) :: descStr="DESCRIPTION"
!> Dimension information
integer(HSIZE_T) :: dims (MAXIODIM) = 0
!> Offset for reading/writing a hyperslab, optional
integer(HSIZE_T) :: offsets(MAXIODIM) = 0
logical :: useHyperslab=.false.
integer :: Nr = 0 !Number of ranks
integer :: N = 0 !Total number of elements
real(rp), dimension(:), allocatable :: data !1D holder for data
!> Number of ranks of dataset
integer :: Nr = 0
!> Total number of elements
integer :: N = 0
!> 1D holder for float data
real(rp), dimension(:), allocatable :: data
!> 1D holder for integer data
integer, dimension(:), allocatable :: data_int
real(rp) :: scale=1.0, renorm=0.0
logical :: toWrite=.false.,toRead=.false. !Read or write this variable
logical :: isDone =.false. !Whether or not variable has been successfully read/written
integer :: vType=IONULL
character(len=strLen) :: dStr !Optional string data
end type IOVAR_T
end module ioH5types

View File

@@ -37,58 +37,58 @@ module kdefs
real(rp), parameter :: G_cgs = 6.6726D-8 ![cm^3/g/s^2], Gravitational constant (per NRL plasma formulary'21)
!MKS Constants
real(rp), parameter :: vc_mks = vc_cgs*(1.0e-2) ![m/s], Speed of light
real(rp), parameter :: vc_mks = vc_cgs*(1.0D-2) ![m/s], Speed of light
real(rp), parameter :: G_mks = 6.6726D-11 ![m^3/kg/s^2], Gravitational constant (per NRL plasma formulary'21)
!Helper conversions
real(rp), parameter :: G2nT = 1.0E+5 !Gauss->nT
real(rp), parameter :: G2T = 1.0E-4 !Gauss->T
real(rp), parameter :: G2nT = 1.0D+5 !Gauss->nT
real(rp), parameter :: G2T = 1.0D-4 !Gauss->T
real(rp), parameter :: kev2J = 1.602176634D-16 !keV->J
real(rp), parameter :: ev2J = kev2J*(1.0e-3) ! eV->J
real(rp), parameter :: kev2erg = kev2J*1.0e+7
real(rp), parameter :: ev2J = kev2J*(1.0D-3) ! eV->J
real(rp), parameter :: kev2erg = kev2J*1.0D+7
real(rp), parameter :: erg2kev = 1.0/kev2erg
real(rp), parameter :: Re_km = Re_cgs*(1.0e-2)*(1.0e-3) !km
real(rp), parameter :: Re_km = Re_cgs*(1.0D-2)*(1.0D-3) !km
!Misc
real(rp), parameter :: Mu0 = 4*PI*1.0e-7 ! Tm/A
real(rp), parameter :: Mu0 = 4*PI*1.0D-7 ! Tm/A
real(rp), parameter :: Kbltz = 1.380649D-16 ![cm^2 g /s^2/K=erg/K] Boltzmann constant
real(rp), parameter :: mec2 = (Me_cgs*vc_cgs**2.0)*1.0E-3/kev2erg ! [MeV] electron rest mass
real(rp), parameter :: mec2 = (Me_cgs*vc_cgs**2.0)*(1.0D-3)/kev2erg ! [MeV] electron rest mass
real(rp), parameter :: heFrac = 1.16D0 ! Accounts for 4% helium
real(rp), parameter :: eCharge = 1.602D-19 ! Charge of electron
real(rp), parameter :: eCharge = 1.60217663D-19 ! Charge of electron
!NOTE: dalton isn't precisely Mp b/c carbon binding energy business
real(rp), parameter :: dalton = 1.66053906660*1.0E-27 ! Mass unit [kg]
real(rp), parameter :: dalton = 1.6605390666D-27 ! Mass unit [kg]
!Planetary constants
!Earth
!real(rp), parameter :: EarthM0g = 0.31 !Gauss, old LFM value
real(rp), parameter :: EarthM0g = 0.2961737 !Gauss, Olsen++ 2000
real(rp), parameter :: EarthM0g = 0.2961737_rp !Gauss, Olsen++ 2000
real(rp), parameter :: REarth = Re_cgs*1.0e-2 !m
real(rp), parameter :: REarth = Re_cgs*1.0D-2 !m
real(rp), parameter :: RionE = 6.5 ! Earth Ionosphere radius in 1000 km
real(rp), parameter :: RionE = 6.5_rp ! Earth Ionosphere radius in 1000 km
! Earth corotation potential
!real(rp), parameter :: EarthPsi0 = 92.4 ! !!OUTDATED, based on LFM magntic field strength
!real(rp), parameter :: EarthPsi0 = 87.62 ! Calculated using B = 0.2961737 Gauss
real(rp), parameter :: EarthPsi0 = REarth**2 * (2.0*PI/86400.0) * (EarthM0g*G2T) * 1.0e-3 ! [kV]
real(rp), parameter :: EarthPsi0 = REarth**2 * (2.0*PI/86400.0) * (EarthM0g*G2T) * 1.0D-3 ! [kV]
! Everyone should ideally get Psi0 from planet object, but things specific to Earth should still be able to rely on EarthPsi0
!Saturn
real(rp), parameter :: SaturnM0g = 0.21 !Gauss
real(rp), parameter :: RSaturnXE = 9.5 !Rx = X*Re
real(rp), parameter :: SaturnM0g = 0.21_rp !Gauss
real(rp), parameter :: RSaturnXE = 9.5_rp !Rx = X*Re
!Jupiter
real(rp), parameter :: JupiterM0g = 4.8 !Gauss
real(rp), parameter :: RJupiterXE = 11.0 !Rx = X*Re
real(rp), parameter :: JupiterM0g = 4.8_rp !Gauss
real(rp), parameter :: RJupiterXE = 11.0_rp !Rx = X*Re
!Mercury
real(rp), parameter :: MercuryM0g = 0.00345 !Gauss
real(rp), parameter :: RMercuryXE = 0.31397 !Rx = X*Re
real(rp), parameter :: MercuryM0g = 0.00345_rp !Gauss
real(rp), parameter :: RMercuryXE = 0.31397_rp !Rx = X*Re
!Neptune
real(rp), parameter :: NeptuneM0g = 0.142 !Gauss
real(rp), parameter :: RNeptuneXE = 3.860 !Rx = X*Re
real(rp), parameter :: NeptuneM0g = 0.142_rp !Gauss
real(rp), parameter :: RNeptuneXE = 3.860_rp !Rx = X*Re
!Helio constants
real(rp), parameter :: Rsolar = 6.956D5 ! [km] Solar radius
real(rp), parameter :: Msolar = 1.98847D30 ! [kg] Solar mass
real(rp), parameter :: Tsolar_synodic = 27.28 ![days] synodic Tsolar
real(rp), parameter :: Tsolar_synodic = 27.28_rp ![days] synodic Tsolar
!Numbered accessors
!Directions
@@ -108,6 +108,11 @@ module kdefs
enumerator :: VELX=MOMX,VELY,VELZ,PRESSURE
endenum
! Directions, used by ShellGrid, Remix, calcdb, etc
enum, bind(C)
enumerator :: NORTH=1,SOUTH,EAST,WEST
end enum
#ifdef USECOLORTEXT
!Color options for funsies
character, parameter :: ANSIESCAPE = char(27) !Escape character

View File

@@ -589,4 +589,18 @@ module math
endif
end function SmoothOperator33
function isAscending(A) result(isAsc)
real(rp), dimension(:), intent(in) :: A
logical :: isAsc
integer :: N
N = size(A)
if ( all(A(2:N)-A(1:N-1)>0) ) then
isAsc = .True.
else
isAsc = .False.
end if
end function isAscending
end module math

View File

@@ -31,6 +31,13 @@ module gamtypes_mpi
type(MPI_Datatype), dimension(:), allocatable :: recvTypesMagFlux
integer(kind=MPI_AN_MYADDR), dimension(:), allocatable :: sendDisplsMagFlux, recvDisplsMagFlux
! Bxyz Data Transfer Variables
integer, dimension(:), allocatable :: sendCountsBxyz
type(MPI_Datatype), dimension(:), allocatable :: sendTypesBxyz
integer, dimension(:), allocatable :: recvCountsBxyz
type(MPI_Datatype), dimension(:), allocatable :: recvTypesBxyz
integer(kind=MPI_AN_MYADDR), dimension(:), allocatable :: sendDisplsBxyz, recvDisplsBxyz
! Debugging flags
logical :: printMagFluxFaceError = .false.
real(rp) :: faceError = 0.0_rp

View File

@@ -25,13 +25,15 @@ module mpidefs
integer, parameter :: MPI_AN_MYADDR = MPI_ADDRESS_KIND ! this is the default
#endif
!MPI Communicator Id
integer, parameter :: voltId = 116
integer, parameter :: gamId = 45
integer, parameter :: traceId = 47
integer, parameter :: rcmId = 34
integer, parameter :: tgcmId = 57
integer, parameter :: hidraId = 40
!MPI Coupling Communicator Id
integer, parameter :: voltId = 116
integer, parameter :: gamId = 45
integer, parameter :: rcmId = 34
integer, parameter :: tgcmId = 57
integer, parameter :: hidraId = 40
integer, parameter :: hidraNId = 54
integer, parameter :: hidraSId = 59
integer, parameter :: mageId = 26
contains

View File

@@ -24,6 +24,12 @@ module volttypes_mpi
logical :: doSquishHelp = .false., masterSquish = .false.
logical :: squishLoadBalance = .true., deepProcessingInProgress=.false.
! coupling comms variables to be done on volt rank
type(MPI_Comm) :: mageCplComm
integer :: voltCplRank,CplSize
integer :: gcmCplRank = -1, hidraNCplRank = -1,hidraSCplRank = -1,hidraCplRank = -1
integer, dimension(:),allocatable :: IAm
! mpi voltron specific options
type(VoltOptionsMpi_T) :: vOptionsMpi

437
src/base/remixReader.F90 Normal file
View File

@@ -0,0 +1,437 @@
!> Reads REMIX data from output files, performs interpolation in time, etc.
module remixReader
use kdefs
use ioH5
use XML_Input
use iotable
use shellGrid
use shellUtils
use hdf5
implicit none
integer :: MAXIOVARS = 20
!------
! Types
!------
type rmHemi_T
integer :: nStp
real(rp) :: time
type(ShellGrid_T) :: shGr
!! Copy of ShellGrid as defined in rmReader_T
type(ShellGridVar_T) :: Fac,SigP,SigH,Pot
!! Vars defined relative to ShellGrid
end type rmHemi_T
type rmReader_T
! Model stuff
character(len=strLen) :: rmF
!! Remix filename we are reading form
! Grid stuff
real(rp), dimension(:,:,:), allocatable :: XY
!! X/Y coordinates in 2D. Convenient since we need it for lots of 2D arrays
type(ShellGrid_T) :: shGr
! Its kinda like a grid of file steps if you really think about it
type(ioTab_T) :: rmTab
!! Table of steps in a mix.h5 file
! State stuff
real(rp) :: time
!! Current sim time according to whoever's in charge of rmReader
integer :: i1=-1,i2=-1
!! Input file step numbers bracketing time
type(rmHemi_T) :: rmN1,rmN2,rmS1,rmS2
!! File data at step numbers bracketing time
logical :: doStatic
!! Whether or not we are out of valid remix time bounds and should switch to static operation
type(ShellGridVar_T), dimension(NORTH:SOUTH) :: nsFac, nsSigP, nsSigH, nsPot
!! Time-interpolated North/South hemisphere shellVar objects
end type rmReader_T
contains
!------
! Init
!------
subroutine initRM(ftag,inpXML,rmReader)
character(len=*), intent(in) :: ftag
!! Filename tag in this format: <ftag>.mix.h5
type(XML_Input_T), intent(in) :: inpXML
type(rmReader_T), intent(inout) :: rmReader
type(IOVAR_T), dimension(MAXIOVARS) :: IOVars
real(rp), dimension(:), allocatable :: th1D, ph1D
!! 1D arrays of theta and phi coordinates, derived from XY locations
real(rp), dimension(:,:,:), allocatable :: tmpXY
!! Remix data is coming in at (Np,Nt), we use this to read and then convert to (Nt,Np)
real(rp), dimension(:,:), allocatable :: tmpXcn, tmpYcn
!! Temporary X/Y in real mix corner format
integer :: Nt,Np
!! # of non-ghost cell centers
integer :: j, h
!! Loop indices
real(rp) :: Rp_m, Ri_rp
write(rmReader%rmF,'(2a)') trim(adjustl(ftag)),'.mix.h5'
write(*,*) 'Initializing rmReader w/ ', trim(rmReader%rmF)
! Get file data loaded in
call InitIOTab(rmReader%rmTab, inpXML, rmReader%rmF)
rmReader%rmTab%bStr = trim(adjustl(ftag))
rmReader%doStatic = (rmReader%rmTab%N == 1)
! Start with grid
call ClearIO(IOVars)
call AddInVar(IOVars,"X")
call AddInVar(IOVars,"Y")
call AddInVar(IOVars,"Ri_m") ! Attribute
call AddInVar(IOVars,"Rp_m") ! Attribute
call ReadVars(IOVars,.true.,rmReader%rmF)
!! Grid conversion
!! X/Y in file are grid corners around the data
!! To get original X/Y coords, we need to take the average and retrieve the cell-centered coordinates
!! This means that the variable data is now stored at cell corners
!! But, the outputted variable data only goes from phi [0,2Pi-dphi]
!! So in order to comply with shellGrid expectations, we need to copy first phi column to end of array as well
!! So the final size of the arrays we care about are (Nt-1,Np)
!! Also, keep in mind that mix.h5 drops the pole theta points, so we have 1 less theta than run-time mix. Doesn't matter here though
Np = IOVars(FindIO(IOVars, "X"))%dims(1)
Nt = IOVars(FindIO(IOVars, "X"))%dims(2)
! Get ionosphere radius in units of Rp if possible
if (ioExist(rmReader%rmF, "Rp_m") .and. ioExist(rmReader%rmF, "Ri_m")) then
Rp_m = IOVars(FindIO(IOVars, "Rp_m"))%data(1)
Ri_rp = IOVars(FindIO(IOVars, "Ri_m"))%data(1) / Rp_m
else
! If info not present in file, default to hard-coded Earth values
Ri_rp = RIonE*1e6/REarth
endif
! One day we will get grid in this format directly from the mix.h5 file
! But today is not that day
allocate(rmReader%XY(Nt-1,Np,XDIR:YDIR))
allocate(tmpXY(Np,Nt,XDIR:YDIR))
call IOArray2DFill(IOVars,"X",tmpXY(:,:,XDIR))
call IOArray2DFill(IOVars,"Y",tmpXY(:,:,YDIR))
call genInGrid(tmpXY(:,:,XDIR), tmpXY(:,:,YDIR), tmpXcn, tmpYcn)
rmReader%XY(:,:Np-1,XDIR) = transpose(tmpXcn)
rmReader%XY(:,:Np-1,YDIR) = transpose(tmpYcn)
! Wrap in j
rmReader%XY(:,Np,:) = rmReader%XY(:,1,:)
! Now XY is in the desired format, time for shellGrid
! XY are in units of ionospheric radii (r = 1 Ri)
! So the theta and phi we calculate are also for the ionospheric grid
allocate(th1D(Nt-1))
allocate(ph1D(Np))
th1D = asin(rmReader%XY(:,1,XDIR))
ph1D = atan2(rmReader%XY(Nt-1,:,YDIR), rmReader%XY(Nt-1,:,XDIR))
! Clean up phi for shellGrid generation
do j=Np/2-1,Np
if (ph1D(j) < 0) then
ph1D(j) = ph1D(j) + 2.0_rp*PI
endif
enddo
if (abs(ph1D(1)) < TINY) then
ph1D(1) = 0.0
endif
if (ph1D(Np) < PI) then
ph1D(Np) = ph1D(Np) + 2.0_rp*PI
endif
associate(sh=>rmReader%shGr)
call GenShellGrid(sh, th1D, ph1D, "remixReader", radO=Ri_rp)
! Hooray we have a shellGrid now
! Init our vars
do h=NORTH,SOUTH
call initShellVar(sh, SHCORNER, rmReader%nsFac(h))
rmReader%nsFac(h)%mask(sh%is:sh%ie+1, sh%js:sh%je+1) = .true.
call initShellVar(sh, SHCORNER, rmReader%nsPot(h) , rmReader%nsFac(h)%mask)
call initShellVar(sh, SHCORNER, rmReader%nsSigP(h), rmReader%nsFac(h)%mask)
call initShellVar(sh, SHCORNER, rmReader%nsSigH(h), rmReader%nsFac(h)%mask)
enddo
! Now init hemispheres
call initHemi(rmReader%rmN1, sh, "_N1")
call initHemi(rmReader%rmN2, sh, "_N2")
call initHemi(rmReader%rmS1, sh, "_S1")
call initHemi(rmReader%rmS2, sh, "_S2")
end associate
contains
subroutine initHemi(rmHemi,shGr, nameSuffix)
type(rmHemi_T), intent(inout) :: rmHemi
type(ShellGrid_T), intent(in) :: shGr
!! Reference shellGrid, already defined
character(len=*), intent(in) :: nameSuffix
character(len=strLen) :: fullName
rmHemi%nStp = -1 !Not yet set
rmHemi%time = 0.0
associate(hsg=>rmHemi%shGr)
! Generate our own copy of the parent shellGrid
write(fullName,"(A,A)") trim(shGr%name), nameSuffix
call GenChildShellGrid(shGr, hsg, fullName)
! Init our variables
call initShellVar(hsg, SHCORNER, rmHemi%Fac)
rmHemi%Fac%mask(hsg%is:hsg%ie+1, hsg%js:hsg%je+1) = .true.
call initShellVar(hsg, SHCORNER, rmHemi%Pot , rmHemi%Fac%mask)
call initShellVar(hsg, SHCORNER, rmHemi%SigP, rmHemi%Fac%mask)
call initShellVar(hsg, SHCORNER, rmHemi%SigH, rmHemi%Fac%mask)
end associate
end subroutine initHemi
end subroutine initRM
!------
! Update
!------
subroutine updateRM(rmReader, t)
! Update rmReader to time t
! TODO: This should do fancier stuff, like ebICstd:updateFields
type(rmReader_T), intent(inout) :: rmReader
real(rp), intent(in) :: t
real(rp) :: w1, w2
! Update tabSlices
call GetTabSlc(rmReader%rmTab,t,rmReader%i1,rmReader%i2)
if ( t >= maxval(rmReader%rmTab%times) ) then
rmReader%doStatic = .true.
endif
! Read the 4 hemispheres
call readHemi(rmReader, rmReader%rmN1, rmReader%i1, NORTH)
call readHemi(rmReader, rmReader%rmS1, rmReader%i1, SOUTH)
call readHemi(rmReader, rmReader%rmN2, rmReader%i2, NORTH)
call readHemi(rmReader, rmReader%rmS2, rmReader%i2, SOUTH)
!Now fill in remix main state for this t
call GetTWgts(rmReader%rmN1%time, rmReader%rmN2%time, t, rmReader%doStatic, w1, w2)
call hemi2rm(rmReader, w1, w2)
!write(*,*)"-----"
!write(*,*)t
!write(*,*)rmReader%rmN1%time,",",rmReader%rmN2%time
!write(*,*)w1,",",w2
contains
subroutine readHemi(rmReader, rmHemi, nStp, nsID)
!! Reads hemisphere information from remix file step
type(rmReader_T), intent(in) :: rmReader
type(rmHemi_T), intent(inout) :: rmHemi
integer, intent(in) :: nStp, nsID
character(len=strLen) :: hID,gStr
type(IOVAR_T), dimension(MAXIOVARS) :: IOVars
! Check to see if we need to read
if (rmHemi%nStp == nStp) then
! We've already read this data
return
endif
! Otherwise, get the date
! Which hemisphere?
if (nsID == NORTH) then
hID = "NORTH"
else
hID = "SOUTH"
endif
gStr = trim(rmReader%rmTab%gStrs(nStp))
write(*,'(5a)') '<Reading hemisphere from ', trim(rmReader%rmTab%bStr), '/', trim(gStr), '>'
rmHemi%time = rmReader%rmTab%times(nStp)
rmHemi%nStp = nStp
call ClearIO(IOVars)
call AddInVar(IOVars,"Field-aligned current " // hID)
call AddInVar(IOVars, "Pedersen conductance " // hID)
call AddInVar(IOVars, "Hall conductance " // hID)
call AddInVar(IOVars, "Potential " // hID)
call ReadVars(IOVars,.true.,rmReader%rmF,gStr)
! Abstract away the janky mapping, eventually replace with ShellGrid-friendly variable reading
call readVarJank(IOVars, "Field-aligned current " // hID, rmHemi%shGr, rmHemi%Fac )
call readVarJank(IOVars, "Pedersen conductance " // hID, rmHemi%shGr, rmHemi%SigP)
call readVarJank(IOVars, "Hall conductance " // hID, rmHemi%shGr, rmHemi%SigH)
call readVarJank(IOVars, "Potential " // hID, rmHemi%shGr, rmHemi%Pot )
end subroutine readHemi
end subroutine updateRM
!------
! Helpers
!------
subroutine GetTWgts(t1, t2, t, doStatic, w1, w2)
real(rp), intent(in) :: t1, t2, t
logical, intent(in) :: doStatic
real(rp), intent(out) :: w1,w2
real(rp) :: dt
if (doStatic) then
w1 = 1.0
w2 = 0.0
else
dt = t2-t1
w1 = (t2 - t)/dt
w2 = (t - t1)/dt
endif
end subroutine GetTWgts
subroutine hemi2rm(rmReader, w1, w2)
type(rmReader_T), intent(inout) :: rmReader
real(rp), intent(in) :: w1, w2
rmReader%nsFac (NORTH)%data = w1*rmReader%rmN1%Fac %data + w2*rmReader%rmN2%Fac %data
rmReader%nsPot (NORTH)%data = w1*rmReader%rmN1%Pot %data + w2*rmReader%rmN2%Pot %data
rmReader%nsSigP(NORTH)%data = w1*rmReader%rmN1%SigP%data + w2*rmReader%rmN2%SigP%data
rmReader%nsSigH(NORTH)%data = w1*rmReader%rmN1%SigH%data + w2*rmReader%rmN2%SigH%data
rmReader%nsFac (SOUTH)%data = w1*rmReader%rmS1%Fac %data + w2*rmReader%rmS2%Fac %data
rmReader%nsPot (SOUTH)%data = w1*rmReader%rmS1%Pot %data + w2*rmReader%rmS2%Pot %data
rmReader%nsSigP(SOUTH)%data = w1*rmReader%rmS1%SigP%data + w2*rmReader%rmS2%SigP%data
rmReader%nsSigH(SOUTH)%data = w1*rmReader%rmS1%SigH%data + w2*rmReader%rmS2%SigH%data
end subroutine hemi2rm
!------
! Temporary Helpers
!------
subroutine genInGrid(xc,yc,x,y)
!! Mix h5 grid to "real" mix grid
!! NOTE: This has been modified from the version in mixio
real(rp), dimension(:,:),intent(in) :: xc,yc ! with corners 1/2-cell shifted from original
real(rp), dimension(:,:),allocatable,intent(out) :: x,y
integer, dimension(2) :: dims
integer :: Np, Nt
dims = shape(xc)
Np = dims(1)-1; Nt = dims(2)
if (.not.allocated(x)) allocate(x(Np,Nt-1))
if (.not.allocated(y)) allocate(y(Np,Nt-1))
!x(:,2:Nt) = 0.25*(xc(2:Np+1,2:Nt)+xc(1:Np,2:Nt)+xc(2:Np+1,1:Nt-1)+xc(1:Np,1:Nt-1))
!y(:,2:Nt) = 0.25*(yc(2:Np+1,2:Nt)+yc(1:Np,2:Nt)+yc(2:Np+1,1:Nt-1)+yc(1:Np,1:Nt-1))
x = 0.25*(xc(2:Np+1,2:Nt)+xc(1:Np,2:Nt)+xc(2:Np+1,1:Nt-1)+xc(1:Np,1:Nt-1))
y = 0.25*(yc(2:Np+1,2:Nt)+yc(1:Np,2:Nt)+yc(2:Np+1,1:Nt-1)+yc(1:Np,1:Nt-1))
! fix pole
!x(:,1) = 0
!y(:,1) = 0
end subroutine genInGrid
subroutine readVarJank(IOV, vName, shGr, v)
!! Placeholder readVar to make current remix output file work will ShellGrid format
!! TODO: One day we will remove this once we implement a more standardized ShellGrid-friendly output file format
type(IOVAR_T), dimension(MAXIOVARS), intent(in) :: IOV
character(len=strLen), intent(in) :: vName
type(ShellGrid_T), intent(in) :: shGr
type(ShellGridVar_T), intent(inout) :: v
!real(rp), dimension(shGr%Np, shGr%Nt+1) :: tmpVar
real(rp), dimension(shGr%js:shGr%je, shGr%is:shGr%ie+1) :: tmpVar
integer :: idx
associate (isg=>shGr%isg, ieg=>shGr%ieg, jsg=>shGr%jsg, jeg=>shGr%jeg,\
is =>shGr%is , ie =>shGr%ie , js =>shGr%js , je =>shGr%je )
call IOArray2DFill(IOV,vName,tmpVar)
v%data(is:ie+1,js:je) = transpose(tmpVar)
v%data(is:ie+1,je+1) = v%data(:,1)
! Copy last good cell through ghosts just so there's a value there
do idx=isg,is-1
v%data(idx,:) = v%data(is,:)
enddo
do idx=ie+2,ieg
v%data(idx,:) = v%data(ie+1,:)
enddo
call wrapJ_SGV(shGr, v)
! But, we're gonna say that only our real domain is valid
v%mask = .false.
v%mask(is:ie+1, js:je+1) = .true.
end associate
end subroutine readVarJank
subroutine outputRMSG(rmReader, fname, isFirst, gStrO)
!! Write rmReader stuff to file
!! Pretty much just for debugging
type(rmReader_T), intent(in) :: rmReader
character(len=*), intent(in) :: fname
logical, intent(in) :: isFirst
character(len=*), optional, intent(in) :: gStrO
logical :: gExist
type(IOVAR_T), dimension(10) :: IOVars
if (isFirst) then
call CheckAndKill(fname, .true.)
endif
if (.not. ioExist(fname, "sh_th")) then
call ClearIO(IOVars)
call AddOutVar(IOVars,"sh_th",rmReader%shGr%th)
call AddOutVar(IOVars,"sh_ph",rmReader%shGr%ph)
call WriteVars(IOVars,.true.,fname)
endif
! If still here, we are good and need to write more stuff to file
! If still here, varO and vNameO present
if (present(gStrO)) then
call ClearIO(IOVars)
call AddOutVar(IOVars, "time", rmReader%time)
call AddOutVar(IOVars, "N1_time", rmReader%rmN1%time)
call AddOutVar(IOVars, "nsPot NORTH data", rmReader%nsPot(NORTH)%data)
call AddOutVar(IOVars, "nsPot NORTH mask", merge(1.0_rp, 0.0_rp, rmReader%nsPot(NORTH)%mask))
call WriteVars(IOVars,.true.,fname,gStrO=gStrO)
endif
end subroutine outputRMSG
end module remixReader

View File

@@ -1,266 +0,0 @@
!Various data structures and routines to do interpolation from (and to?) a spherical shell grid
!TODO: Add routine to take list of scattered points and interpolate to ShellGrid_T
module shellinterp
use kdefs
implicit none
integer, parameter, private :: NumTSC = 9
!Data type for holding 2D spherical shell grid
type ShellGrid_T
integer :: NLat,NLon !Number of lat/lon cells
!xxI = interfaces (Nx+1)
!xxC = centers (Nx)
!Assuming lat \in -pi/2,pi/2 and lon \in [0,2pi]
real(rp), dimension(:), allocatable :: LatI,LatC,LonI,LonC !Radians
logical :: doSP = .false., doNP = .false. !Whether grid contains south/north pole
real(rp) :: minLat,maxLat
end type ShellGrid_T
contains
!Create a shell grid data structure
!Takes iLats (latitudinal edges, size NLat+1) and NLon cells (assuming uniform spacing)
subroutine GenShellGrid(shGr,iLats,NLat,NLon)
type(ShellGrid_T), intent(inout) :: shGr
real(rp), intent(in) :: iLats(NLat+1)
integer , intent(in) :: NLat,NLon
real(rp) :: dphi
integer :: n
!Nuke arrays if already allocated
if (allocated(shGr%LatI)) deallocate(shGr%LatI)
if (allocated(shGr%LatC)) deallocate(shGr%LatC)
if (allocated(shGr%LonI)) deallocate(shGr%LonI)
if (allocated(shGr%LonC)) deallocate(shGr%LonC)
!Create new arrays
shGr%NLat = NLat
shGr%NLon = NLon
allocate(shGr%LatI(shGr%NLat+1))
allocate(shGr%LonI(shGr%NLon+1))
allocate(shGr%LatC(shGr%NLat))
allocate(shGr%LonC(shGr%NLon))
!Set edges
shGr%LatI(:) = iLats
dphi = 2*PI/shGr%NLon
do n=1,shGr%NLon+1
shGr%LonI(n) = (n-1)*dphi
enddo
!Set centers
shGr%LatC = ( shGr%LatI(2:shGr%NLat+1) + shGr%LatI(1:shGr%NLat) )/2.0
shGr%LonC = ( shGr%LonI(2:shGr%NLon+1) + shGr%LonI(1:shGr%NLon) )/2.0
!Decide if this has north/south pole
shGr%doNP = .false.
shGr%doSP = .false.
if ( maxval(shGr%LatI) >= (PI/2.0 - TINY) ) then
shGr%doNP = .true.
endif
if ( minval(shGr%LatI) <= (-PI/2.0 + TINY) ) then
shGr%doSP = .true.
endif
if (shGr%doSP .or. shGr%doNP) then
!Die for now
write(*,*) "This routine does not yet support handling north/south pole"
stop
endif
shGr%minLat = minval(shGr%LatI)
shGr%maxLat = maxval(shGr%LatI)
end subroutine GenShellGrid
!Interpolate on grid shGr a cell-centered variable (Q) at point lat,lon
!Result is Qp
!Optional : isGood (NLat,NLon), a mask for good/bad data
!Optional : isGoodP, whether Qp is a good value
subroutine InterpShell(shGr,Q,lat,lonin,Qp,isGoodP,isGood)
type(ShellGrid_T), intent(in) :: shGr
real(rp), intent(in) :: Q(shGr%NLat,shGr%NLon)
real(rp), intent(out) :: Qp
real(rp), intent(in) :: lat,lonin
logical , intent(out), optional :: isGoodP
logical , intent(in) , optional :: isGood(shGr%NLat,shGr%NLon)
integer :: i0,j0,ij0(2),di,dj
integer :: ip,jp,n
real(rp) :: dlat,dphi,eta,zeta,lon
real(rp), dimension(NumTSC) :: Ws,Qs
logical , dimension(NumTSC) :: isGs
real(rp), dimension(-1:+1) :: wE,wZ
Qp = 0.0
if (present(isGoodP)) then
isGoodP = .false.
endif
!Do some short circuiting
if ( (lat>shGr%maxLat) .or. (lat<shGr%minLat) ) then
!Point not on this grid, get outta here
return
endif
if (lonin<0) then
lon = lonin+2*PI
else
lon = lonin
endif
!Now we know this point is in our grid
call GetShellIJ(shGr,lat,lon,ij0) !Find the i,j cell this point is in
i0 = ij0(1)
j0 = ij0(2)
if (present(isGood)) then
!Check cell is good
if (.not. isGood(i0,j0)) return
endif
!Have central cell and know that it's good
isGoodP = .true.
!Trap for near-pole cases
if (shGr%doSP .and. (i0==1)) then
!Handle south pole and return
write(*,*) "Not implemented!"
stop
endif
if (shGr%doNP .and. (i0==shGr%NLat)) then
!Handle north pole and return
write(*,*) "Not implemented!"
stop
endif
!Note: If still here we know i0 isn't on the boundary
!Calculate local mapping
dlat = shGr%LatI(i0+1)-shGr%LatI(i0)
dphi = shGr%LonI(j0+1)-shGr%LonI(j0)
eta = ( lat - shGr%LatC(i0) )/dlat
zeta = ( lon - shGr%LonC(j0) )/dphi
call ClampMapVar(eta)
call ClampMapVar(zeta)
!Calculate weights
call TSCweight1D(eta ,wE)
call TSCweight1D(zeta,wZ)
!Now loop over surrounding cells and get weights/values
n = 1
do dj=-1,+1
do di=-1,+1
ip = i0+di
jp = j0+dj
!Wrap around boundary
if (jp<1) jp = shGr%NLon
if (jp>shGr%NLon) jp = 1
!Do zero-grad for lat
if (ip<1) ip = 1
if (ip>shGr%NLat) ip = shGr%NLat
Qs(n) = Q(ip,jp)
Ws(n) = wE(di)*wZ(dj)
if (present(isGood)) then
isGs(n) = isGood(ip,jp)
else
isGs(n) = .true.
endif
if (.not. isGs(n)) Ws(n) = 0.0
n = n + 1
enddo
enddo !dj
!Renormalize
Ws = Ws/sum(Ws)
!Get final value
Qp = dot_product(Qs,Ws)
!Have some internal functions
contains
!Clamps mapping in [-0.5,0.5]
subroutine ClampMapVar(ez)
REAL(rp), intent(inout) :: ez
if (ez<-0.5) ez = -0.5
if (ez>+0.5) ez = +0.5
end subroutine ClampMapVar
!1D triangular shaped cloud weights
!1D weights for triangular shaped cloud interpolation
!Assuming on -1,1 reference element, dx=1
!Check for degenerate cases ( |eta| > 0.5 )
subroutine TSCweight1D(eta,wE)
real(rp), intent(in) :: eta
real(rp), intent(out) :: wE(-1:1)
wE(-1) = 0.5*(0.5-eta)**2.0
wE( 1) = 0.5*(0.5+eta)**2.0
wE( 0) = 0.75 - eta**2.0
end subroutine TSCweight1D
end subroutine InterpShell
!For a shGr type find the ij cell that the point lat/lon is in
!NOTE: Returns 0,0 if the point isn't in the grid
subroutine GetShellIJ(shGr,lat,lonin,ij0)
type(ShellGrid_T), intent(in) :: shGr
real(rp), intent(in) :: lat,lonin
integer, intent(out) :: ij0(2)
real(rp) :: lon,dp,dJ
integer :: iX,jX
if (lonin<0) then
lon = lonin+2*PI
else
lon = lonin
endif
ij0 = 0
!Do some short circuiting
if ( (lat>shGr%maxLat) .or. (lat<shGr%minLat) ) then
!Point not on this grid, get outta here
return
endif
!If still here then the lat bounds are okay, let's do this
!Get lat part
iX = minloc( abs(shGr%LatC-lat),dim=1 ) !Find closest lat cell center
!Now get lon part, assume uniform phi spacing
dp = shGr%LonC(2)-shGr%LonC(1)
dJ = lon/dp
jX = floor(dJ) + 1
!Impose bounds just in case
iX = max(iX,1)
iX = min(iX,shGr%NLat)
jX = max(jX,1)
jX = min(jX,shGr%NLon)
ij0 = [iX,jX]
end subroutine GetShellIJ
end module shellinterp

View File

@@ -0,0 +1,508 @@
!> Various data structures and routines to define grids on spherical shells
module shellGrid
use kdefs
use math
implicit none
!> Identifiers for location of variable data relative to shell grid
!> Cell center, corners, theta faces, phi faces
enum, bind(C)
enumerator :: SHCC,SHCORNER,SHFTH,SHFPH
end enum
!> Data type for holding 2D spherical shell grid
type ShellGrid_T
character(len=strLen) :: name
!! Name assigned to this ShellGrid instance, determined by the model initializing it
real(rp) :: radius
!! Radius that this ShellGrid lives at, in units of Rp (planetary radii)
integer :: Nt,Np
!! Number of colat/lon cells (theta, phi)
real(rp), dimension(:), allocatable :: th, ph, lat
!! (Nt+1 or Np+1) [radians] grid corners
!! th (theta) is colatitude and runs from north pole toward south
!! Phi is longitude, with zero/2pi at 12 MLT
!! Assuming lat in -pi/2,pi/2 and lon in [0,2pi]
real(rp), dimension(:), allocatable :: thc, phc, latc
!! (Nt or Np) [radians] grid centers
logical :: doSP = .false., doNP = .false.
!! Whether active grid contains south/north pole, no ghosts in this case
logical :: ghostSP = .false., ghostNP = .false.
!! Whether the last ghost corner is the south/north pole
real(rp) :: minTheta, maxTheta, minPhi, maxPhi
!! Theta and phi bounds of grid excluding ghost cells
real(rp) :: minGTheta, maxGTheta, minGPhi, maxGPhi
!! Theta and phi bounds of grid including ghost cells
integer :: is=1,ie,js=1,je
!! Local indices, active region
integer :: isg,ieg,jsg,jeg
!! Local indices, including ghosts
integer :: Ngn=0, Ngs=0, Nge=1, Ngw=1
!! Ghosts for north, south, east, and west boundaries. East=>larger phi. West => smaller phi.
!! default to 0 for N/S, 1 for E/W
logical :: isPeriodic
!! Whether the low/high phi boundary is periodic
logical :: isPhiUniform
!! Define this to speed up search for interpolation
!> Subgrid information
!> ShellGrids that are subgrids of other shellGrids store info about their parent grid
logical :: isChild = .false.
character(len=strLen) :: parentName
!! Name of the parent grid that this one derives from
integer :: bndis,bndie,bndjs,bndje
!! Indices of parent grid that bound this grid
! TODO: add unique identifiers for this SG, and for potential parent SG
! That way, child always knows which grid it came from
! Can be checked against when calling routines like InterpParentToChild, InterpChildToParent
end type ShellGrid_T
type ShellGridVar_T
integer :: loc
!! Location of data on the shellGrid (e.g. center, corner, theta of phi face)
!! Corresponds to enum above (SHCC, SHCORNER, SHFTH, SHFPH)
integer :: Ni, Nj
!! Number of values in i and j direction
integer :: isv,iev,jsv,jev
!! Start and end indices for this variable
!! ex: if loc=SHCORNER, isv = sh%isg, iev=sh%ieg+1
!! This is helpful for e.g. InterpShellVar_TSC_pnt determining size of dtheta and dPhi arrays
real(rp), dimension(:,:), allocatable :: data
!! The actual variable values
logical, dimension(:,:), allocatable :: mask
!! Mask indicating whether the data at a given index is valid
!! e.g. good for interpolation, etc.
! Commenting out for now, I think we will ultimately not use this
!logical, dimension(4) :: bcsApplied
!! Flag indicating whether BCs were applied (ghosts filled) for [n,s,e,w] boundaries
end type ShellGridVar_T
contains
!> Create a shell grid data structure
!> Takes Theta and Phi 1D arrays (uniform or not)
!> Decides if isPeriodic and isPhiUniform based on the Phi array passed in
subroutine GenShellGrid(shGr,Theta,Phi,name,nGhosts,radO)
type(ShellGrid_T), intent(inout) :: shGr
real(rp), dimension(:), intent(in) :: Theta, Phi
character(len=*) :: name
!! Name identifier used for this grid instance
integer, optional, dimension(4), intent(in) :: nGhosts
!! How many ghosts on each side (n,s,e,w)
real(rp), optional, intent(in) :: radO
!! Radius in planetary radii from planet center this grid lives at
!! WARNING: Will default to 1 if not provided! That should rarely be the case
integer :: i,j
real(rp) :: delta
real(rp), dimension(:), allocatable :: dphi
! Parse optional parameters
if (present(nGhosts)) then
shGr%Ngn = nGhosts(NORTH) ! otherwise, always 0 as set in ShellGrid type
shGr%Ngs = nGhosts(SOUTH) ! otherwise, always 0 as set in ShellGrid type
shGr%Nge = nGhosts(EAST) ! otherwise, always 1 as set in ShellGrid type
shGr%Ngw = nGhosts(WEST) ! otherwise, always 1 as set in ShellGrid type
end if
! do some checks first
if (.not.(isAscending(Theta))) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Theta array must be ascending. Quitting..."
stop
end if
if (.not.(isAscending(Phi))) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Phi array must be ascending. Quitting..."
stop
end if
if (any(Theta<0.).or.(any(Theta>PI))) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Theta array should be in the range [0,PI]. Quitting..."
stop
end if
if (any(Phi<0.).or.(any(Phi>2*PI))) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Phi array should be in the range [0,2*PI]. Quitting..."
stop
end if
! decide if the grid is periodic
if ( ( minval(Phi) <= TINY ).and.( maxval(Phi) >= 2*PI - TINY) ) then
shGr%isPeriodic = .True.
else
shGr%isPeriodic = .False.
end if
if ( (shGr%isPeriodic).and.(shGr%Nge/=shGr%Ngw) ) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Periodic grid must have the same number of ghosts on low/high phi boundaries. Quitting..."
stop
end if
if ( (.not.(shGr%isPeriodic)).and.(shGr%Nge/=shGr%Ngw) ) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Grids with Nge/=Ngw have not been implemented. Quitting..."
stop
end if
! Decide if this has north/south pole
shGr%doNP = .false.
shGr%doSP = .false.
if ( maxval(Theta) >= (PI - TINY) ) then
shGr%doSP = .true.
endif
if ( minval(Theta) <= (TINY) ) then
shGr%doNP = .true.
endif
if ( (shGr%doNP).and.(shGr%Ngn/=0) ) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Grid containing the north pole can't have Ngn/=0. Quitting..."
stop
end if
if ( (shGr%doSP).and.(shGr%Ngs/=0) ) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Grid containing the south pole can't have Ngs/=0. Quitting..."
stop
end if
! define various array indices
shGr%Np = size(Phi)-1
shGr%Nt = size(Theta)-1
shGr%is = 1; shGr%ie = shGr%Nt
shGr%js = 1; shGr%je = shGr%Np
shGr%isg = shGr%is - shGr%Ngn
shGr%ieg = shGr%ie + shGr%Ngs
shGr%jsg = shGr%js - shGr%Ngw
shGr%jeg = shGr%je + shGr%Nge
! decide if the grid is uniform in Phi
! helps to speed up interpolation search
if (allocated(dphi)) deallocate(dphi)
allocate(dphi(shGr%Np)) ! helper
dphi = Phi(2:shGr%Np+1) - Phi(1:shGr%Np)
if ( all( dphi - dphi(1) <= TINY ) ) then
shGr%isPhiUniform = .True.
else
shGr%isPhiUniform = .False.
end if
deallocate(dphi)
associate(is=>shGr%is, ie=>shGr%ie, js=>shGr%js, je=>shGr%je, &
isg=>shGr%isg, ieg=>shGr%ieg, jsg=>shGr%jsg, jeg=>shGr%jeg)
! Nuke arrays if already allocated
if (allocated(shGr%th)) deallocate(shGr%th)
if (allocated(shGr%ph)) deallocate(shGr%ph)
if (allocated(shGr%lat)) deallocate(shGr%lat)
if (allocated(shGr%thc)) deallocate(shGr%thc)
if (allocated(shGr%phc)) deallocate(shGr%phc)
if (allocated(shGr%latc)) deallocate(shGr%latc)
! Create new arrays
allocate(shGr%th (isg:ieg+1))
allocate(shGr%thc (isg:ieg ))
allocate(shGr%ph (jsg:jeg+1))
allocate(shGr%phc (jsg:jeg ))
allocate(shGr%lat (isg:ieg+1))
allocate(shGr%latc(isg:ieg ))
! Set grid coordinates
shGr%th(is:ie+1) = Theta ! note the arrays are conformable because of the index definitions above
shGr%ph(js:je+1) = Phi
! Define ghost coordinates
! Do north, unless it's the pole (no ghosts for poles)
if ( (.not.shGr%doNP).and.(shGr%Ngn/=0) ) then
! linearly extrapolate
do i=1,shGr%Ngn
shGr%th(is-i) = 2*shGr%th(is) - shGr%th(is+i)
end do
! check if we got into negative thetas
! then just fill the space with same size cells down to theta=0
if ( shGr%th(is-shGr%Ngn) < 0 ) then
delta = shGr%th(is)/shGr%Ngn
do i=1,shGr%Ngn
shGr%th(is-i) = shGr%th(is) - delta*i
end do
shGr%ghostNP = .true.
end if
end if
! Do south, unless it's the pole (no ghosts for poles)
if ( (.not.shGr%doSP).and.(shGr%Ngs/=0) ) then
! linearly extrapolate
do i=1,shGr%Ngs
shGr%th(ie+1+i) = 2*shGr%th(ie+1) - shGr%th(ie+1-i)
end do
! check if we got into thetas > pi
! then just fill the space with same size cells up to theta=pi
if ( shGr%th(ie+1+shGr%Ngs) > PI ) then
delta = (PI - shGr%th(ie+1))/shGr%Ngs
do i=1,shGr%Ngs
shGr%th(ie+1+i) = shGr%th(ie+1) + delta*i
end do
shGr%ghostSP = .true.
end if
end if
! if non-periodic grid is needed, can implement ghosts on E/W ends similarly to N/S above
! but we assume the grid is always periodic
if ( (.not.shGr%isPeriodic) ) then
write(*,*) "Inside shell grid generator (GenShellGrid)."
write(*,*) "Non-periodic grids are not implemented. Quitting..."
stop
endif
! Note, we already made sure that for a periodic grid Nge=Ngw, phi(js)=0 and phi(je+1)=pi
do j=1,shGr%Nge
shGr%ph(js-j) = shGr%ph(je+1-j) - 2*PI
shGr%ph(je+1+j) = shGr%ph(js+j) + 2*PI
end do
! Set centers
shGr%thc = ( shGr%th(isg+1:ieg+1) + shGr%th(isg:ieg) )/2.
shGr%phc = ( shGr%ph(jsg+1:jeg+1) + shGr%ph(jsg:jeg) )/2.
! Set latitude
shGr%lat = PI/2.0_rp - shGr%th
shGr%latc = PI/2.0_rp - shGr%thc
! Note, the bounds below only include the active cells
shGr%minTheta = minval(shGr%th(is:ie+1))
shGr%maxTheta = maxval(shGr%th(is:ie+1))
shGr%minPhi = minval(shGr%ph(js:je+1))
shGr%maxPhi = maxval(shGr%ph(js:je+1))
! this includes ghosts
shGr%minGTheta = minval(shGr%th)
shGr%maxGTheta = maxval(shGr%th)
shGr%minGPhi = minval(shGr%ph)
shGr%maxGPhi = maxval(shGr%ph)
end associate
shGr%name = name
if (present(radO)) then
! Try to trap for people thinking units are meters or km
if (radO > 10) then
write(*,*) "WARNING for ShellGrid with name: ",name
write(*,*) "Radius being set to ",radO," planetary radii. That seems kinda big."
endif
shGr%radius = radO
else
write(*,*) "WARNING for ShellGrid with name: ",name
write(*,*) "No radius provided, assuming Earth's ionosphere according to kdefs:"
shGr%radius = RIonE*1.0e6/REarth
write(*,*) shGr%radius," Rp"
endif
end subroutine GenShellGrid
subroutine initShellVar(shGr, loc, shellVar, maskO)
!! Inits a ShellGridVar that associated with provided and initialized ShellGrid
type(ShellGrid_T), intent(in) :: shGr
!! ShellGrid that this variable is related to
integer, intent(in) :: loc
!! Location of data (cell center, corner, theta or phi face)
type(ShellGridVar_T), intent(out) :: shellVar
logical, dimension(:,:), optional, intent(in) :: maskO
!! Optional mask to initialize with
integer :: iExtra, jExtra
! If you didn't want your data blown up you shouldn't have called init
if (allocated(shellVar%data)) deallocate(shellVar%data)
if (allocated(shellVar%data)) deallocate(shellVar%mask)
shellVar%loc = loc
! Determine which dimensions have extra index relative to # cells based on variable's location on grid
select case(loc)
case(SHCC)
iExtra = 0
jExtra = 0
case(SHCORNER)
iExtra = 1
jExtra = 1
case(SHFTH)
iExtra = 1
jExtra = 0
case(SHFPH)
iExtra = 0
jExtra = 1
case default
write(*,*) "initShellGridVar got an invalid data location:",loc
stop
end select
allocate(shellVar%data(shGr%isg:shGr%ieg+iExtra, shGr%jsg:shGr%jeg+jExtra))
allocate(shellVar%mask(shGr%isg:shGr%ieg+iExtra, shGr%jsg:shGr%jeg+jExtra))
shellVar%Ni = shGr%Nt + shGr%Ngn + shGr%Ngs + iExtra
shellVar%Nj = shGr%Np + shGr%Nge + shGr%Ngw + jExtra
shellVar%isv = shGr%isg
shellVar%iev = shGr%ieg + iExtra
shellVar%jsv = shGr%jsg
shellVar%jev = shGr%jeg + jExtra
shellVar%data = 0. ! initialize to 0
! Init mask, either by maskO or defaults
if (present(maskO)) then
if ( all(shape(shellVar%mask) == shape(maskO)) ) then
shellVar%mask = maskO
else
write(*,*)"ERROR in initShellVar: maskO shape doesn't match."
stop
endif
else
shellVar%mask = .false. ! Up to user to determine which points are valid
endif
end subroutine initShellVar
subroutine GenChildShellGrid(pSG, cSG, name, nGhosts, sub_is, sub_ie, sub_js, sub_je)
!! Given a parent ShellGrid, makes a child ShellGrid as a subset of parent
type(ShellGrid_T), intent(in) :: pSG
!! Parent ShellGrid
type(ShellGrid_T), intent(out) :: cSG
!! Child ShellGrid
character(len=*) :: name
!! Name identifier used for this grid instance
integer, optional, dimension(4), intent(in) :: nGhosts
!! Number of ghosts the child grid will have
integer, optional, intent(in) :: sub_is, sub_ie, sub_js, sub_je
!! Start and end i/j indices of parent grid that bound active domain of new child grid
!! These are optional. If left out, we will essentially make a copy of the parent grid
integer :: is, ie, js, je
!! Actual bounds used
! If a bound is provided then we use that, if not we default to parent grid's bounds
is = merge(sub_is, pSG%is , present(sub_is))
ie = merge(sub_ie, pSG%ie+1, present(sub_ie))
js = merge(sub_js, pSG%js , present(sub_js))
je = merge(sub_je, pSG%je+1, present(sub_je))
! Check for valid bounds
if (is < pSG%is) then
write(*,*) "ERROR GenChildShellGrid: Invalid is bound."
write(*,*) "Requested:",is
write(*,*) "Mimumum:",pSG%is
stop
endif
if (ie > pSG%ie+1) then
write(*,*) "ERROR GenChildShellGrid: Invalid ie bound."
write(*,*) "Requested:",ie
write(*,*) "Maximum:",pSG%ie+1
stop
endif
if (js < pSG%js) then
write(*,*) "ERROR GenChildShellGrid: Invalid js bound."
write(*,*) "Requested:",js
write(*,*) "Mimumum:",pSG%js
stop
endif
if (je > pSG%je+1) then
write(*,*) "ERROR GenChildShellGrid: Invalid je bound."
write(*,*) "Requested:",je
write(*,*) "Maximum:",pSG%je+1
stop
endif
if (is > ie) then
write(*,*) "ERROR GenChildShellGrid: is > ie"
stop
endif
if (js > je) then
write(*,*) "ERROR GenChildShellGrid: js > je"
stop
endif
! Make sure name is not a copy of its parent's
if (trim(name) == trim(pSG%name)) then
write(*,*) "ERROR GenChildShellGrid: Child can't have same name as its parent"
write(*,*) " Try '",trim(pSG%name)," ",trim(pSG%name),"sson'"
stop
endif
! Otherwise we are ready to make a child grid
if ( present(nGhosts) ) then
! The ghosts can't overrun parent grid's ghost bounds
if ( is - nGhosts(NORTH) < pSG%isg ) then
write(*,*) "ERROR GenChildShellGrid: Child ghosts overrun parent ghosts in is direction"
stop
elseif (ie + nGhosts(SOUTH) > pSG%ieg+1) then
write(*,*) "ERROR GenChildShellGrid: Child ghosts overrun parent ghosts in ie direction"
stop
elseif (js - nGhosts(WEST ) < pSG%jsg ) then
write(*,*) "ERROR GenChildShellGrid: Child ghosts overrun parent ghosts in js direction"
stop
elseif (je + nGhosts(EAST ) > pSG%jeg+1) then
write(*,*) "ERROR GenChildShellGrid: Child ghosts overrun parent ghosts in je direction"
stop
endif
! Otherwise, this ghost definition is okay
call GenShellGrid(cSG, pSG%th(is:ie), pSG%ph(js:je), name, nGhosts=nGhosts, radO=pSG%radius)
else
! No ghosts defined, go with default
call GenShellGrid(cSG, pSG%th(is:ie), pSG%ph(js:je), name, radO=pSG%radius)
endif
cSG%isChild = .true.
cSG%parentName = pSG%name
cSG%bndis = is
cSG%bndie = ie
cSG%bndjs = js
cSG%bndje = je
end subroutine GenChildShellGrid
subroutine deallocShellGrid(sh)
!! Deallocates any allocated memory
type(ShellGrid_T), intent(inout) :: sh
deallocate(sh%th)
deallocate(sh%ph)
deallocate(sh%thc)
deallocate(sh%phc)
deallocate(sh%lat)
deallocate(sh%latc)
end subroutine deallocShellGrid
end module shellGrid

View File

@@ -0,0 +1,670 @@
! Various data structures and routines to do interpolation from (and to?) a spherical shell grid
! Routines are fairly broken up to try and allow for a reasonable balance of flexibility and performance
! TODO: Add routine to take list of scattered points and interpolate to ShellGrid_T
module shellInterp
use kdefs
use math
use shellgrid
use shellutils
implicit none
integer, parameter, private :: NumTSC = 9
contains
! InterpShellVar - TODO
! InterpShellVar_ChildToParent - TODO
! InterpShellVar_ParentToChild - TODO
! InterpShellVar_TSC_SG
! InterpShellVar_TSC_pnt
subroutine InterpShellVar(sgSource, sgVar, sgDest, varOut)
!! This is meant to be the highest-abstraction option for interpolation
!! Not expected to be used very frequently, but maybe it makes sense for some simple variables
!! Basically, the source and destination are well-defined within ShellGrid's intended use,
!! so we can make the best decisions here on how to interpolate this variable
type(ShellGrid_T) , intent(in) :: sgSource
!! Source shellGrid that sgVar lives on
type(ShellGridVar_T), intent(in) :: sgVar
!! the variable we are interpolating
type(ShellGrid_T) , intent(in) :: sgDest
!! The destination grid
type(ShellGridVar_T), intent(inout) :: varOut
!! sgVar interpolated ontp sgDest
end subroutine InterpShellVar
subroutine InterpShellVar_TSC_SG(sgSource, sgVar, sgDest, varOut, dThetaO, dPhiO)
!! Interpolate a ShellGridVar to another ShellGrid using the Triangle-Schaped Cloud method
type(ShellGrid_T) , intent(in) :: sgSource
!! Source shellGrid that sgVar lives on
type(ShellGridVar_T), intent(in) :: sgVar
!! the variable we are interpolating
type(ShellGrid_T) , intent(in) :: sgDest
!! The destination grid
type(ShellGridVar_T), intent(inout) :: varOut
!! sgVar interpolated ontp sgDest
real(rp), dimension(:), optional, intent(in) :: dThetaO
!! Cell width in theta direction
real(rp), dimension(:), optional, intent(in) :: dPhiO
!! Width in theta direction
integer :: extraPnt
integer :: i,j
real(rp), dimension(:), allocatable :: dTheta
real(rp), dimension(:), allocatable :: dPhi
logical :: goodInterp
if ( present(dThetaO) ) then
! First, make sure they are the same shape
if (size(dTheta) .ne. sgVar%Ni) then
write(*,*) "WARNING: InterpShellVar_TSC_SG got a mismatched dThetaO shape. Dying."
stop
endif
! Otherwise, we can safely copy one to the other
allocate(dTheta(sgVar%isv:sgVar%iev))
dTheta = dThetaO
else
! If dTheta not present, we calculate it ourselves
if (sgVar%loc == SHCC .or. sgVar%loc == SHFPH) then
! Note that the location id is the variable's 1D location w.r.t. the theta axis
call calcdx_TSC(sgSource%th, sgSource%isg, sgSource%ieg, SHCC, dTheta)
elseif (sgVar%loc == SHCORNER .or. sgVar%loc == SHFTH) then
call calcdx_TSC(sgSource%th, sgSource%isg, sgSource%ieg, SHCORNER, dTheta)
endif
!! Note: calcdx_TSC sets the dimension of dTheta and dPhi
endif
if ( present(dPhiO) ) then
! First, make sure they are the same shape
if (size(dPhiO) .ne. sgVar%Nj) then
write(*,*) "WARNING: InterpShellVar_TSC_SG got a mismatched dPhiO shape. Dying."
stop
endif
! Otherwise, we can safely copy one to the other
allocate(dPhi(sgVar%jsv:sgVar%jev))
dPhi = dPhiO
else
! If dPhi not present, we calculate it ourselves
if (sgVar%loc == SHCC .or. sgVar%loc == SHFTH) then
call calcdx_TSC(sgSource%ph, sgSource%jsg, sgSource%jeg, SHCC, dPhi)
elseif (sgVar%loc == SHCORNER .or. sgVar%loc == SHFPH) then
call calcdx_TSC(sgSource%ph, sgSource%jsg, sgSource%jeg, SHCORNER, dPhi)
endif
endif
! Now that we have our dTheta and dPhi, we can start interpolating
! Which destination grid locations we loop over depends on the destination variable location
select case(varOut%loc)
case(SHCC)
!do j=varOut%jsv,varOut%jev
! do i=varOut%isv,varOut%iev
!^^^ This indexing works just fine, but I'm not gonna do it cause its less clear what we're actually looping over
!$OMP PARALLEL DO default(shared) collapse(1) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=sgDest%jsg,sgDest%jeg
do i=sgDest%isg,sgDest%ieg
if (.not. varOut%mask(i,j)) cycle
! NOTE/TODO: This is where we would do transformations of destination grid's theta dn phi to source grid
! in the case where they have different coordinate systems
call InterpShellVar_TSC_pnt( \
sgSource, sgVar,\
sgDest%thc(i), sgDest%phc(j),\
varOut%data(i,j), \
dTheta, dPhi,\
goodInterp)
! TODO: Handle case where goodInterp is false here
! Probably will be model dependent. Maybe we return a 2D goodInterp array if an optional array is provided to us
enddo
enddo
case(SHCORNER)
!$OMP PARALLEL DO default(shared) collapse(1) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=sgDest%jsg,sgDest%jeg+1
do i=sgDest%isg,sgDest%ieg+1
if (.not. varOut%mask(i,j)) cycle
! NOTE/TODO: This is where we would do transformations of destination grid's theta dn phi to source grid
! in the case where they have different coordinate systems
call InterpShellVar_TSC_pnt( \
sgSource, sgVar,\
sgDest%th(i), sgDest%ph(j),\
varOut%data(i,j), \
dTheta, dPhi,\
goodInterp)
enddo
enddo
case(SHFTH)
!$OMP PARALLEL DO default(shared) collapse(1) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=sgDest%jsg,sgDest%jeg
do i=sgDest%isg,sgDest%ieg+1
if (.not. varOut%mask(i,j)) cycle
! NOTE/TODO: This is where we would do transformations of destination grid's theta dn phi to source grid
! in the case where they have different coordinate systems
call InterpShellVar_TSC_pnt( \
sgSource, sgVar,\
sgDest%th(i), sgDest%phc(j),\
varOut%data(i,j), \
dTheta, dPhi,\
goodInterp)
enddo
enddo
case(SHFPH)
!$OMP PARALLEL DO default(shared) collapse(1) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=sgDest%jsg,sgDest%jeg+1
do i=sgDest%isg,sgDest%ieg
if (.not. varOut%mask(i,j)) cycle
! NOTE/TODO: This is where we would do transformations of destination grid's theta dn phi to source grid
! in the case where they have different coordinate systems
call InterpShellVar_TSC_pnt( \
sgSource, sgVar,\
sgDest%thc(i), sgDest%ph(j),\
varOut%data(i,j), \
dTheta, dPhi,\
goodInterp)
enddo
enddo
end select
! Fill j ghosts with active cell data
call wrapJ_SGV(sgDest, varOut)
end subroutine InterpShellVar_TSC_SG
subroutine InterpShellVar_TSC_pnt(sgsource, sgVar, th, pin, Qinterp, dThetaO, dPhiO, goodInterpO)
!! Given the source information, interpolate sgVar to point (t,p) and return as Qout
type(ShellGrid_T ), intent(in) :: sgSource
!! Source ShellGrid
type(ShellGridVar_T), intent(in) :: sgVar
!! Variable relative to provided ShellGrid
real(rp), intent(in) :: th
!! Theta coordinate with respect to source grid
real(rp), intent(in) :: pin
!! Phi coordinate with respect to source grid
real(rp), intent(out) :: Qinterp
!! Interpolated value we are returning
real(rp), dimension(sgVar%isv:sgVar%iev), optional, intent(in) :: dThetaO
!! Cell width in theta, centered at variable positions on source grid
real(rp), dimension(sgVar%jsv:sgVar%jev), optional, intent(in) :: dPhiO
!! Cell width in phi, centered at variable positions on source grid
logical, optional, intent(inout) :: goodInterpO
!! True if we are returning a meaningful interpolated value
real(rp) :: ph
!! Cleaned-up phi location we actually use
integer :: i0, j0
!! i and j locations of point t,p
!! Whether they are respect to corner, center, or a face depends on sgVar%loc
real(rp) :: dTh, dPh
!! dTheta and dPhi at location i0,j0 , assuming we are in domain
real(rp) :: t0, p0
!! Theta and Phi values at point i0,j0
real(rp) :: eta, zeta
real(rp), dimension(-1:+1) :: wE,wZ
real(rp), dimension(NumTSC) :: Ws,Qs
logical , dimension(NumTSC) :: isGs
integer :: ipnt,jpnt,n,di,dj
! Default return values
Qinterp = 0.0
if (present(goodInterpO)) goodInterpO = .false.
! Check bounds
ph = modulo(pin,2*PI)
! Also make sure the requested theta is in bounds
if ( (th<0.).or.(th>PI) ) then
write(*,*) "ERROR in InterpShellVar_TSC_pnt: Theta should be in the range [0,PI]. Quitting..."
stop
end if
call getShellILoc(sgSource, sgVar%loc, th, i0, t0) ! Sets i0 and t0 to closest i/theta values to interp point t
call getShellJLoc(sgSource, sgVar%loc, ph, j0, p0) ! Sets j0 and p0 to closest i/phi values to interp point p
if (i0 > sgVar%iev .or. i0 < sgVar%isv) then
return
endif
if (j0 > sgVar%jev .or. j0 < sgVar%jsv) then
write(*,*) "ERROR in InterpShellVar_TSC_pnt: Phi out of bounds. idx=",j0
write(*,*) "This wasn't supposed to be possible, good job."
return
endif
! Make sure data is good at this point
if (.not. sgVar%mask(i0,j0)) then
return
endif
! If still here we're gonna do something, so we can tell our caller we are returning a valid value
if (present(goodInterpO)) goodInterpO = .true.
! Determine dTheta and dPhi for this ij point
if (present(dThetaO)) then
! First, make sure dTheta and dPhi are defined at sgVar locations
if ( size(dThetaO) .ne. sgVar%Ni ) then
write(*,*)"ERROR in InterpShellVar_TSC_pnt: dTheta != sgVar%Ni"
write(*,*) size(dThetaO), sgVar%Ni
stop
endif
dTh = dThetaO(i0)
else
! dThetaO array not provided, so we calculate it ourselves
if (sgVar%loc == SHCC .or. sgVar%loc == SHFPH) then
dTh = Diff1D_4halfh(sgSource%th, sgSource%isg, sgSource%ieg , i0)
else if (sgVar%loc == SHCORNER .or. sgVar%loc == SHFTH) then
dTh = Diff1D_4h (sgSource%th, sgSource%isg, sgSource%ieg+1, i0)
endif
endif
if (present(dPhiO)) then
if ( size(dPhiO) .ne. sgVar%Nj ) then
write(*,*)"ERROR in InterpShellVar_TSC_pnt: dPhi != sgVar%Nj"
write(*,*) size(dPhiO), sgVar%Nj
stop
endif
dPh = dPhiO(j0)
else
! dPhiO array not provided, so we calculate it ourselves
if (sgVar%loc == SHCC .or. sgVar%loc == SHFTH) then
dPh = Diff1D_4halfh(sgSource%ph, sgSource%jsg, sgSource%jeg , j0)
else if (sgVar%loc == SHCORNER .or. sgVar%loc == SHFPH) then
dPh = Diff1D_4h (sgSource%ph, sgSource%jsg, sgSource%jeg+1, j0)
endif
endif
if (sgSource%doNP .and. (i0==sgSource%is)) then
call interpPole(sgSource,sgVar,th,ph,Qinterp)
! Handle north pole and return
write(*,*) "Not implemented!"
stop
endif
! First, if active grid has poles
if (sgSource%doSP .and. (i0==sgSource%ie)) then
! Handle south pole and return
write(*,*) "Not implemented!"
stop
endif
! Now, if ghost grid has poles
if (sgSource%ghostSP .and. (i0==sgsource%ieg)) then
write(*,*) "Not implemented!"
stop
endif
if (sgSource%ghostNP .and. (i0==sgSource%isg)) then
write(*,*) "Not implemented!"
stop
endif
! Note: If still here we know i0 isn't on the boundary
eta = (th - t0)/dTh
zeta = (ph - p0)/dPh
call ClampMapVar(eta)
call ClampMapVar(zeta)
! Calculate weights
call TSCweight1D(eta ,wE)
call TSCweight1D(zeta,wZ)
! Collect weights/values
n = 1
do dj=-1,+1
do di=-1,+1
ipnt = i0+di
jpnt = j0+dj
! Wrap around boundary
if (jpnt<1) jpnt = sgSource%Np
if (jpnt>sgSource%Np) jpnt = 1
! Do zero-grad for theta
if (ipnt<1) ipnt = 1
if (ipnt>sgSource%Nt) ipnt = sgSource%Nt
Qs(n) = sgVar%data(ipnt,jpnt)
Ws(n) = wE(di)*wZ(dj)
isGs(n) = sgVar%mask(ipnt,jpnt)
if (.not. isGs(n)) Ws(n) = 0.0
n = n + 1
enddo
enddo
! Renormalize
Ws = Ws/sum(Ws)
! Get final value
Qinterp = dot_product(Qs,Ws)
! Have some internal functions
contains
! Clamps mapping in [-0.5,0.5]
subroutine ClampMapVar(ez)
REAL(rp), intent(inout) :: ez
if (ez<-0.5) ez = -0.5
if (ez>+0.5) ez = +0.5
end subroutine ClampMapVar
! 1D triangular shaped cloud weights
! 1D weights for triangular shaped cloud interpolation
! Assuming on -1,1 reference element, dx=1
! Check for degenerate cases ( |eta| > 0.5 )
subroutine TSCweight1D(eta,wE)
real(rp), intent(in) :: eta
real(rp), intent(out) :: wE(-1:1)
wE(-1) = 0.5*(0.5-eta)**2.0
wE( 1) = 0.5*(0.5+eta)**2.0
wE( 0) = 0.75 - eta**2.0
end subroutine TSCweight1D
end subroutine InterpShellVar_TSC_pnt
subroutine getShellILoc(shGr, varLoc, t, iLoc, tLocO)
type(ShellGrid_T), intent(in) :: shGr
integer :: varLoc
!! Location id of the source variable
real(rp), intent(in) :: t
integer, intent(out) :: iLoc
real(rp), optional, intent(out) :: tLocO
real(rp) :: tLoc
if (varLoc == SHCC .or. varLoc == SHFPH) then
!! Variable is defined at center w.r.t. theta direction
if ( (t>shGr%maxGTheta) ) then
iLoc = shGr%ieg+ceiling((t-shGr%maxGTheta)/(shGr%th(shGr%ieg+1)-shGr%th(shGr%ieg)))
tLoc = shGr%thc(shGr%ieg) ! Just return the last available theta value
!write(*,*)"theta going out of bounds",t,shGr%maxGTheta
else if ( (t<shGr%minGTheta) ) then
iLoc = shGr%isg-ceiling((shGr%minGTheta-t)/(shGr%th(shGr%isg+1)-shGr%th(shGr%isg)))
tLoc = shGr%thc(shGr%isg)
!write(*,*)"theta going out of bounds",t,shGr%minGTheta
else
! If still here then the lat bounds are okay, find closest lat cell center
iLoc = minloc( abs(shGr%thc-t),dim=1 )
tLoc = shGr%thc(iLoc)
endif
if (present(tLocO)) then
tLocO = tLoc
endif
elseif (varLoc == SHCORNER .or. varLoc == SHFTH) then
!! Variable is defined at corners w.r.t. theta direction
if ( (t>shGr%maxTheta) ) then
iLoc = shGr%ieg+1 + floor( 0.5 + (t-shGr%maxGTheta)/(shGr%th(shGr%ieg+1)-shGr%th(shGr%ieg)) )
tLoc = shGr%th(shGr%ieg+1) ! Just return the last available theta value
!write(*,*)"theta going out of bounds",t,shGr%maxGTheta
else if ( (t < shGr%minTheta)) then
iLoc = shGr%isg - floor( 0.5 + (shGr%minGTheta-t)/(shGr%th(shGr%isg+1)-shGr%th(shGr%isg)) )
tLoc = shGr%th(shGr%isg)
!write(*,*)"theta going out of bounds",t,shGr%maxGTheta
else
! If still here then the lat bounds are okay, find closest lat cell corner
iLoc = minloc( abs(shGr%th-t),dim=1 )
tLoc = shGr%th(iLoc)
endif
if (present(tLocO)) then
tLocO = tLoc
endif
endif
end subroutine getShellILoc
subroutine getShellJLoc(shGr, varLoc, pin, jLoc, pLoc)
type(ShellGrid_T), intent(in) :: shGr
integer :: varLoc
!! Location id of the source variable
real(rp), intent(in) :: pin
integer, intent(out) :: jLoc
real(rp), optional, intent(out) :: pLoc
real(rp) :: p, dp, dJ
p = modulo(pin,2*PI)
! note, shellGrid only implements [0,2pi] grids
! but do this check here in case it's needed in the future
if ( (p>shGr%maxPhi) .or. (p<shGr%minPhi) ) then
! Point not on this grid, get outta here
write(*,*) "ERROR in getShellJLoc, phi outside of bounds"
write(*,*) p, shGr%minPhi, shGr%maxPhi
stop
endif
if (varLoc == SHCC .or. varLoc == SHFTH) then
!! Variable is defined at centers w.r.t. phi direction
if (shGr%isPhiUniform) then
! note this is faster, thus preferred
dp = shGr%phc(2)-shGr%phc(1)
dJ = p/dp
jLoc = floor(dJ) + 1
else
jLoc = minloc( abs(shGr%phc-p),dim=1 ) ! Find closest lat cell center
endif
if (present(pLoc)) then
pLoc = shGr%phc(jLoc)
endif
elseif (varLoc == SHCORNER .or. varLoc == SHFPH) then
!! Variable is defined at corners w.r.t. phi direction
if (shGr%isPhiUniform) then
! note this is faster, thus preferred
dp = shGr%ph(2)-shGr%ph(1)
dJ = p/dp + 0.5
jLoc = floor(dJ) + 1
else
jLoc = minloc( abs(shGr%ph-p),dim=1 ) ! Find closest lat cell center
endif
if (present(pLoc)) then
pLoc = shGr%ph(jLoc)
endif
endif
end subroutine getShellJLoc
!! Big TODO here
subroutine interpPole(shGr,Qin,t,pin,Qinterp)
type(ShellGrid_T), intent(in) :: shGr
type(ShellGridVar_T), intent(in) :: Qin
real(rp), intent(out) :: Qinterp
real(rp), intent(in) :: t,pin
real(rp) :: f0,f1,f2,I1,I2
integer :: j,pole,iind ! the latter is the index of the pole cell (first/last for NORTH/SOUTH)
integer :: jpi2,jpi32,jpi ! which cell do pi/2, 3pi/2 and pi points belong to
Qinterp = 0.0
! first, find out which pole we're at
! note, if we're inside this function, we already know we're at one of the poles
if ( (t.ge.0).and.(t.le.shGr%th(shGr%is+1)) ) then
pole = NORTH
iind = shGr%is
else if ( (t.le.PI).and.(t.ge.shGr%th(shGr%ie)) ) then
pole = SOUTH
iind = shGr%ie
else
write(*,*) "Inside interpPole. Shouldn't be here. Quitting..."
end if
write(*,*) 'which pole ',pole,iind
! represent the function near pole to first order in theta as
! f(t,p) = f0 + f1*cos(p)*t + f2*sin(p)*t
! (Lewis&Bellan, J. Math. Phys. 31, 2592 (1990);
! https://doi.org/10.1063/1.529009
!
! then, 2pi*f0 = \int_0^2pi f(t(i=1),p), where t(i=1) is the cell center of first cell in i-direction
! to calculate f1, define
! I1 = \int_(-pi/2)^(pi/2) f(t,p)dp = - \int_(pi/2)^(3pi/2) f(t,p)dp = 2*f1*t+f0*pi
! compute I1 = 0.5*(\int_(-pi/2)^(pi/2) f(t,p)dp - \int_(pi/2)^(3pi/2))
! to take all points around the ring into account
! then f1 = (I1-f0*pi)/(2*t)
!
! similarly,
! f2 = (I2-f0*pi)/(2*t), where
! I2 = 0.5*(\int_0^pi f(t,p)dp - \int_pi^(2pi) f(t,p)dp)
f0 = 0.
do j=1,shGr%Np
f0 = f0 + (shGr%ph(j+1)-shGr%ph(j))*Qin%data(iind,j)/(2.*PI)
! find which cells do pi/2, 3pi/2 and pi points belong to
! this can be done a priori but it doesn't add to the computation
if ( (shGr%ph(j).le.0.5*pi).and.(shGr%ph(j+1).gt.0.5*pi) ) jpi2 = j
if ( (shGr%ph(j).le. pi).and.(shGr%ph(j+1).gt. pi) ) jpi = j
if ( (shGr%ph(j).le.1.5*pi).and.(shGr%ph(j+1).gt.1.5*pi) ) jpi32 = j
end do
write(*,*) 'pi indices ',jpi2,jpi,jpi32
end subroutine interpPole
!------
! Low-level interp helpers
!------
subroutine calcdx_TSC(x, isg, ieg, loc, dx)
!! Calculates dx for positional array x using a 4-point stencil
!! This is desirable for TSC so that we can have better accuracy
!! in the case of a non-uniformly space grid
! Note, even though we are not doing much in this function, we are
! breaking it out here in case we want to do higher-order for TSC or something later
real(rp), dimension(isg:ieg+1), intent(in) :: x
!! 1D spatial grid, should always be cell corners
integer, intent(in) :: isg, ieg
!! Start/end indices of cell centers
integer, intent(in) :: loc
!! Location identifier, MUST BE SHCC OR SHCORNER
!! This is the location of the points we are calculating dx at relative to x
real(rp), dimension(:), allocatable, intent(out) :: dx
!! 'cell width' we return
integer :: i
if (allocated(dx)) deallocate(dx)
if (loc == SHCORNER) then
allocate(dx(isg:ieg+1))
do i=isg,ieg+1
dx(i) = Diff1D_4h(x, isg, ieg+1, i)
enddo
else if (loc == SHCC) then
allocate(dx(isg:ieg))
do i=isg,ieg
dx(i) = Diff1D_4halfh(x, isg, ieg, i)
enddo
else
write(*,*) "ERROR: Invalid location id in calcdx_TSC. Must be SHCC or SHCORNER"
stop
endif
end subroutine calcdx_TSC
function Diff1D_4halfh(Q,is,ie,i0) result(Qp)
!! Use 4-point stencil to calculate first derivative of coordinates
!! This is for the case where we are using corners to calculate the difference at cell center
!! adapted from chimp/ebinit.F90
real(rp), intent(in) :: Q(is:ie+1)
!! Cell corners
integer, intent(in) :: is,ie
!! Start and end indices of bounding grid
integer, intent(in) :: i0
!! Index of the point we are evaluating, offset half a cell from its lower bound Q(i0)
real(rp) :: Qp
real(rp) :: Qblk(4),c(4)
! Note that we have fewer cases than Diff1D_4h
! That's because even when i0 is the last cell-centered coordinate,
! we still have 1 usable corner value before we reach the void
if (i0 == is) then
! Q coordinates at -0.5,0.5,1.5,2.5 relative to our point
Qblk = [Q(is), Q(is+1), Q(is+2), Q(is+3)]
c = [-23.0, 21.0, 3.0, -1.0]/25.0
else if (i0 == ie) then
! Q coordinates at -2.5,-1.5,-0.5,0.5 relative to our point
Qblk = [Q(ie-2), Q(ie-1), Q(ie), Q(ie+1)]
c = [1.0, -3.0, -21.0, 23.0]/25.0
else
! Q coordinates at -1.5,-0.5,0.5,1.5 relative to our point
Qblk = [Q(i0-1), Q(i0), Q(i0+1), Q(i0+2)]
c = [1.0, -27.0, 27.0, -1.0]/24.0
endif
Qp = dot_product(Qblk,c)
end function Diff1D_4halfh
function Diff1D_4h(Q,is,ie,i0) result(Qp)
!! Use 4-point stencil to calculate first derivative of coordinates
!! This is for the case where position we are calculating the difference for is at Q(i0)
!! (In contrast to e.g. using cell corner values to calculate the difference located at the cell center)
!! adapted from chimp/ebinit.F90
real(rp), intent(in) :: Q(is:ie)
!! Cell corners
integer, intent(in) :: is,ie,i0
real(rp) :: Qp
real(rp) :: Qblk(4),c(4)
if (i0 == is) then
!Forward
Qblk = [Q(is),Q(is+1),Q(is+2),Q(is+3)]
c = [-11.0,18.0,-9.0,2.0]/6.0
else if (i0 == is+1) then
!1 back
Qblk = [Q(is),Q(is+1),Q(is+2),Q(is+3)]
c = [-2.0,-3.0,6.0,-1.0]/6.0
else if (i0 == ie) then
Qblk = [Q(ie-3),Q(ie-2),Q(ie-1),Q(ie)]
c = [-2.0,9.0,-18.0,11.0]/6.0
else if (i0 == ie-1) then
Qblk = [Q(ie-3),Q(ie-2),Q(ie-1),Q(ie)]
c = [1.0,-6.0,3.0,2.0]/6.0
else
!Centered
Qblk = [Q(i0-2),Q(i0-1),Q(i0+1),Q(i0+2)]
c = [1.0,-8.0,8.0,-1.0]/12.0
endif
Qp = dot_product(Qblk,c)
end function Diff1D_4h
end module shellInterp

View File

@@ -0,0 +1,38 @@
! Heap of misc. helpful functions for ShellGrids and ShellGridVars
module shellUtils
use shellGrid
implicit none
contains
subroutine wrapJ_SGV(sh, sgVar)
!! Wrap a ShellGridVar around the periodic j/phi boundary
!! Assumes all vars within active domain are valid to wrap
!! Note: anything in the i-direction ghost cells get wrapped too
type(ShellGrid_T), intent(in) :: sh
type(ShellGridVar_T), intent(inout) :: sgVar
associate(Q => sgVar%data)
! Theta faces are cell-centered w.r.t. j direction
if (sgVar%loc == SHCC .or. sgVar%loc == SHFTH) then
! Starting ghost cells
Q(:, sh%jsg:sh%js-1) = Q(:, sh%je-sh%Ngw+1:sh%je)
! Ending ghosts cells
Q(:, sh%je+1:sh%jeg) = Q(:, sh%js:sh%js+sh%Nge-1)
elseif (sgVar%loc==SHCORNER .or. sgVar%loc == SHFPH) then
! Starting ghost cells
Q(:, sh%jsg:sh%js) = Q(:, sh%je-sh%Ngw+1:sh%je+1)
! Ending ghosts cells
Q(:, sh%je+1:sh%jeg+1) = Q(:, sh%js:sh%js+sh%Nge)
endif
end associate
end subroutine wrapJ_SGV
end module shellUtils

View File

@@ -2,6 +2,7 @@ module strings
use kdefs
use, intrinsic :: iso_fortran_env
use dates, ONLY : DateTimeStr
use git_info, only: gitB, gitH
implicit none
contains
@@ -80,39 +81,16 @@ module strings
subroutine GitHash(gStr)
character(len=*), intent(inout) :: gStr
character(len=strLen) :: cOpts
integer :: n,nOff,nH
gStr = gitH
nOff = 16
nH = 7
#ifdef __INTEL_COMPILER_OLD
gStr = "XXXXXXX" !Avoid unavailable compiler_options
#else
cOpts = compiler_options()
n = index(cOpts,"-DGITCOMMITHASH=")
gStr = cOpts(n+nOff:n+nOff+nH)
#endif
end subroutine GitHash
!Create string with git hash if possible
subroutine GitBranch(gStr)
character(len=*), intent(inout) :: gStr
character(len=strLen) :: cOpts
integer :: n,nOff
gStr = gitB
nOff = 12
#ifdef __INTEL_COMPILER_OLD
gStr = "XXXXXXX" !Avoid unavailable compiler_options
#else
cOpts = compiler_options()
n = index(cOpts,"-DGITBRANCH=")
!Don't know length of branch name, so need to find next space
gStr = cOpts(n+nOff:)
n = index(gStr," ")
gStr = gStr(1:n-1)
#endif
end subroutine GitBranch

View File

@@ -31,7 +31,6 @@ module calcdbtypes
logical :: doPed,doHall
end type rmState_T
INTEGER, parameter :: NORTH=1,SOUTH=2
INTEGER, parameter :: rSegs=30
!--------

View File

@@ -116,6 +116,9 @@ module gamtypes
logical :: isMagsphere = .false.
real(rp) :: MagM0 = 0.0
!LFM vs. new-style rec/lim
logical :: doLFMLim = .true. !Use state n to limit state n+1/2
!Timestep stuff
real(rp) :: limDT0 = 1.0e-3 !Ratio of dt0 to die
logical :: doCPR = .false. !Whether to try and fix low timesteps

View File

@@ -29,6 +29,7 @@ module gcmtypes
real(rp), dimension(:), allocatable :: time,lev,lon,clat,lat
real(rp),dimension(:,:),allocatable :: gx,gy
real(rp),dimension(:,:,:),allocatable :: glon,gclat
real(rp), dimension(:,:,:), allocatable :: invar2d, outvar2d
integer :: cplStep = 1
!character(len=strlen) :: mix2gcmH5,gcm2mixH5,mix2gcmLock,gcm2mixLock
character(len=strlen) :: mix2gcmH5 = "mix4gcm.h5"

View File

@@ -29,6 +29,7 @@ module mixtypes
logical :: doAuroralSmooth
logical :: apply_cap
logical :: doSWF107
logical :: doEMA
! solver
integer :: maxitr
@@ -101,7 +102,7 @@ module mixtypes
type mixConductance_T
integer :: euv_model_type, et_model_type, aurora_model_type
real(rp) :: alpha, beta, R, F107,pedmin,hallmin,sigma_ratio,ped0, alphaZ, betaZ
logical :: const_sigma, doRamp, doChill, doStarlight, apply_cap, doMR, doAuroralSmooth
logical :: const_sigma, doRamp, doChill, doStarlight, apply_cap, doMR, doAuroralSmooth, doEMA
! arrays on the grid
real(rp), dimension(:,:), allocatable :: zenith, coszen
@@ -121,7 +122,8 @@ module mixtypes
type(mixParams_T) :: P
type(Solver_T) :: S
type(mixConductance_T) :: conductance
real(rp) :: rad_iono_m ! Ionosphere radius in meters
real(rp) :: rad_iono_m ! Ionosphere radius in meters
real(rp) :: rad_planet_m ! Planet radius in meters
end type mixIon_T
! used to store all instances of mixIon type, i.e., all hemispheres

View File

@@ -42,7 +42,7 @@ module volttypes
!latc/lonc are the mappings to the southern hemisphere
real(rp), dimension(:,:), allocatable :: latc,lonc
real(rp), dimension(:,:), allocatable :: fac
real(rp), dimension(:,:), allocatable :: eden,epre ! add electron density and pressure channels to REMIX.
real(rp), dimension(:,:), allocatable :: eden,epre,npsp ! add electron density, pressure, plasmasphere density channels to REMIX.
integer , dimension(:,:), allocatable :: gtype ! RCM grid info: active, buffer, or outside
logical, dimension(:,:), allocatable :: inIMag

View File

@@ -15,7 +15,7 @@ module calcdbio
implicit none
character(len=strLen), private :: dbOutF
integer, parameter, private :: MAXDBVS = 40
integer, parameter, private :: MAXDBVS = 80
integer, parameter, private :: RDIR=1,TDIR=2,PDIR=3
real(rp), private :: dzGG = 10.0 !Default height spacing [km]
real(rp), private :: z0 = 0.0 !Starting height [km]

View File

@@ -153,21 +153,29 @@ module chmpfields
!Convert to typical SI A/m2
jScl = (1.0e-9)
case("CODE")
if ( trim(toUpper(Model%uID))=="EARTH") then
! note, using an ugly way to access B and L scaling by using in2G and L0 (globals from chmpunits)
! FIXME: make chimp unit treatment more elegant, like gamera
jScl = in2G*1.e-5/(L0*Mu0)*1.0e-9 ! 1.e-5 to convert from G to nT and 1.e-9 to convert the result from nA to A
else
!Not (yet) supported units
write(*,*) "------------------------"
write(*,*) "Error, units of current density are not [nA/m2] !"
write(*,*) "Units: ", trim(toUpper(ebIOs(nioJx)%unitStr))
write(*,*) "This is likely because the GAMERA simulation is too old."
write(*,*) "Either regenerate the MHD data or add the proper unit scaling."
write(*,*) "Womp womp womp ..."
write(*,*) "------------------------"
stop
end if
if ( trim(toUpper(Model%uID))=="EARTH") then
! note, using an ugly way to access B and L scaling by using in2G and L0 (globals from chmpunits)
! FIXME: make chimp unit treatment more elegant, like gamera
jScl = in2G*1.e-5/(L0*Mu0)*1.0e-9 ! 1.e-5 to convert from G to nT and 1.e-9 to convert the result from nA to A
write(*,*) "WARNING: Only code units of current density specified (probably an old run), trying sensible values. Fingers crossed :)"
else
!Not (yet) supported units
write(*,*) "------------------------"
write(*,*) "Error, units of current density are not [nA/m2] !"
write(*,*) "Units: ", trim(toUpper(ebIOs(nioJx)%unitStr))
write(*,*) "This is likely because the GAMERA simulation is too old."
write(*,*) "Either regenerate the MHD data or add the proper unit scaling."
write(*,*) "Womp womp womp ..."
write(*,*) "------------------------"
stop
end if
case default
write(*,*) "------------------------"
write(*,*) "Error, no units of current density specified. Bailing ..."
write(*,*) "Consult https://wompwompwomp.com"
write(*,*) "------------------------"
stop
end select
endif

View File

@@ -269,7 +269,6 @@ module ebinterp
!Do star fields if necessary
if (doJacob) then
!Do Jacobians and time derivatives
associate( JacB=>gcFields%JacB, JacE=>gcFields%JacE )
if (isAxis) then
!If on axis then do some trickery
@@ -305,10 +304,10 @@ module ebinterp
!Interpolate dJac across the axis
!Add JacB0 at true point
JacE = wAx*gcFieldsAxP%JacE + (1-wAx)*gcFieldsAxM%JacE
JacB = wAx *( gcFieldsAxP%JacB - Model%JacB0(Xp) ) + &
(1-wAx)*( gcFieldsAxM%JacB - Model%JacB0(Xm) ) + &
Model%JacB0(xyz)
gcFields%JacE = wAx*gcFieldsAxP%JacE + (1-wAx)*gcFieldsAxM%JacE
gcFields%JacB = wAx *( gcFieldsAxP%JacB - Model%JacB0(Xp) ) + &
(1-wAx)*( gcFieldsAxM%JacB - Model%JacB0(Xm) ) + &
Model%JacB0(xyz)
else
!Otherwise do standard thing
@@ -341,25 +340,25 @@ module ebinterp
!Do main calculation
do m=1,NDIM !Derivative direction (x,y,z)
do n=1,NDIM !Vector component
JacB(n,m) = wT1*( Tix(IDIR,m)*sum(eW*dB1(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW*dB1(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW*dB1(:,:,:,n)) ) &
+ wT2*( Tix(IDIR,m)*sum(eW*dB2(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW*dB2(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW*dB2(:,:,:,n)) )
gcFields%JacB(n,m) = wT1*( Tix(IDIR,m)*sum(eW*dB1(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW*dB1(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW*dB1(:,:,:,n)) ) &
+ wT2*( Tix(IDIR,m)*sum(eW*dB2(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW*dB2(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW*dB2(:,:,:,n)) )
JacE(n,m) = wT1*( Tix(IDIR,m)*sum(eW* E1(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW* E1(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW* E1(:,:,:,n)) ) &
+ wT2*( Tix(IDIR,m)*sum(eW* E2(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW* E2(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW* E2(:,:,:,n)) )
gcFields%JacE(n,m) = wT1*( Tix(IDIR,m)*sum(eW* E1(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW* E1(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW* E1(:,:,:,n)) ) &
+ wT2*( Tix(IDIR,m)*sum(eW* E2(:,:,:,n)) &
+Tix(JDIR,m)*sum(zW* E2(:,:,:,n)) &
+Tix(KDIR,m)*sum(pW* E2(:,:,:,n)) )
enddo
enddo
!Add background to dJacB
JacB = JacB + Model%JacB0(xyz)
gcFields%JacB = gcFields%JacB + Model%JacB0(xyz)
endif !isAxis
!Time derivatives
@@ -377,11 +376,10 @@ module ebinterp
enddo
!Replace with CurlE if option says to
if (doCurldbdt) then
gcFields%DotB = -Jac2Curl(JacE)
gcFields%DotB = -Jac2Curl(gcFields%JacE)
endif
endif
end associate !Jacobians
endif !doJacob
end associate !Main associate
@@ -472,4 +470,4 @@ module ebinterp
end subroutine GetTWgts
end module ebinterp
end module ebinterp

View File

@@ -8,6 +8,7 @@ module sliceio
use xml_input
use files
use plasmaputils
use parintime
implicit none
@@ -42,7 +43,9 @@ module sliceio
character(len=strLen) :: idStr
logical :: doLog
write(ebOutF,'(2a)') trim(adjustl(Model%RunID)),'.eb.h5'
!write(ebOutF,'(2a)') trim(adjustl(Model%RunID)),'.eb.h5'
!Check for time parallelism
call InitParInTime(Model,inpXML,"eb",ebOutF)
associate( ebGr=>ebState%ebGr )
@@ -240,10 +243,10 @@ module sliceio
real(rp), dimension(:,:), allocatable :: Vr,Lb,LbXY,dLpp,rCurv
integer :: i,j
real(rp), dimension(NDIM) :: xp,xm,dB,Ep,Em,Bp,Bm,B
real(rp), dimension(NDIM) :: xp,xm,dB,Ep,Em,Bp,Bm,Eeq,Beq,B
real(rp) :: MagB,MagJ,oVGScl
real(rp), dimension(NVARMHD) :: Qij
type(gcFields_T) :: gcFieldsP,gcFieldsM
type(gcFields_T) :: gcFieldsP,gcFieldsM,gcFieldsEq
real(rp), dimension(NDIM,NDIM) :: jB
!Data for tracing
@@ -283,7 +286,7 @@ module sliceio
endif
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,xp,xm,Bp,Bm,Ep,Em,dB,Qij,gcFieldsP,gcFieldsM,jB,MagB,MagJ,B)
!$OMP private(i,j,xp,xm,Bp,Bm,Ep,Em,dB,Qij,gcFieldsP,gcFieldsM,jB,MagB,MagJ,B,Eeq,Beq,gcFieldsEq)
do j=1,Nx2
do i=1,Nx1
!Straddle slice plane
@@ -309,9 +312,6 @@ module sliceio
MagB = norm2(B)
MagJ = oBScl*sqrt(sum(jB**2.0))
! radius of curvature
rCurv(i,j) = getRCurv(B/oBScl,jB)
!Get MHD vars if requested
if (Model%doMHD) then
!Qij = mhdInterp(xp,Model%t,Model,ebState)
@@ -338,6 +338,9 @@ module sliceio
!Get field line topology stuff
call SliceFL(Model,ebState,0.5*(xp+xm),Model%t,ebTrcIJ(i,j))
call ebFields(ebTrcIJ(i,j)%MagEQ,Model%t,Model,ebState,Eeq,Beq,gcFields=gcFieldsEq)
! radius of curvature
rCurv(i,j) = getRCurv(Beq,gcFieldsEq%JacB)
endif
if (Model%doPP) then
@@ -372,7 +375,7 @@ module sliceio
call AddOutVar(IOVars,"bD" ,ebTrcIJ(:,:)%bD )
call AddOutVar(IOVars,"bP" ,ebTrcIJ(:,:)%bP )
call AddOutVar(IOVars,"bS" ,ebTrcIJ(:,:)%bS )
call AddOutVar(IOVars,"bMin",ebTrcIJ(:,:)%bMin)
call AddOutVar(IOVars,"bMin",ebTrcIJ(:,:)%bMin*oBscl)
!Equator and end-points
call AddOutVar(IOVars,"xBEQ",ebTrcIJ(:,:)%MagEQ(XDIR))

View File

@@ -255,4 +255,17 @@ module starter
end select
end subroutine setInterpolation
!Set streamline knobs
subroutine setStreamline(Model,inpXML)
USE streamline , ONLY : setShue
USE streamutils, ONLY : setStreamlineKnobs
type(chmpModel_T), intent(inout) :: Model
type(XML_Input_T), intent(inout) :: inpXML
!Set knobs for streamline tracing
call setStreamlineKnobs(Model,inpXML)
!Set things in Shue
call setShue(Model,inpXML)
end subroutine setStreamline
end module starter

View File

@@ -9,12 +9,28 @@ module streamline
implicit none
real(rp), parameter, private :: ShueScl = 1.25 !Safety factor for Shue MP
real(rp), parameter, private :: rShue = 6.0 !Radius to start checking Shue
integer , parameter, private :: NpChk = 10 !Cadence for Shue checking
real(rp), private :: ShueScl = 2.0 !Safety factor for Shue MP
real(rp), private :: rShue = 6.0 !Radius to start checking Shue
integer , private :: NpChk = 10 !Cadence for Shue checking
contains
subroutine setShue(Model,inpXML)
type(chmpModel_T), intent(inout) :: Model
type(XML_Input_T), intent(inout) :: inpXML
if (Model%isMAGE .and. (trim(toUpper(Model%uID)) == "EARTHCODE")) then
!This is for Earth and we're running in tandem w/ mage
!Setup shue for short-circuiting
write(*,*) "Initializing SHUE-MP checking ..."
call inpXML%Set_Val(ShueScl,'streamshue/ShueScl' ,ShueScl)
call inpXML%Set_Val(rShue ,'streamshue/rShue' ,rShue )
call inpXML%Set_Val(NpChk ,'streamshue/NpChk' ,NpChk )
else
!Otherwise don't care about Shue
rShue = HUGE
endif
end subroutine setShue
!doNHO = T, assume doing RCM coupling
subroutine genStream(Model,ebState,x0,t,fL,MaxStepsO,doShueO,doNHO)
real(rp), intent(in) :: x0(NDIM),t
@@ -706,7 +722,9 @@ module streamline
if (inDom) Np = Np + 1
enddo
if (MaxFL == Np) then
! check if exceeded tube bounds
if(Np > MaxFL) then
Np = MaxFL
!$OMP CRITICAL
write(*,*) ANSIRED
write(*,*) "<WARNING! genTrace hit max tube size!>"

View File

@@ -37,8 +37,38 @@ module streamutils
procedure(OneStep_T), pointer :: StreamStep=>Step_MAGE
!Some knobs for tracing cut-off
real(rp), private :: bMinC !Min allowable field strength (in chimp units)
contains
subroutine setStreamlineKnobs(Model,inpXML)
type(chmpModel_T), intent(inout) :: Model
type(XML_Input_T), intent(inout) :: inpXML
character(len=strLen) :: sStr
real(rp) :: bMin_nT
if (Model%isMAGE) then
call inpXML%Set_Val(sStr,'streamline/steptype',"MAGE")
else
call inpXML%Set_Val(sStr,'streamline/steptype',"RK4L")
endif
select case(trim(toUpper(sStr)))
case("MAGE")
StreamStep=>Step_MAGE
case("RK4L")
StreamStep=>Step_RK4L
end select
if (Model%isMAGE) then
call inpXML%Set_Val(bMin_nT,"/Kaiju/voltron/imag/bMin_C",TINY)
else
bMin_nT = 0.0 !Don't do this for non-mage case
endif
!Convert bmin from nT to chimp eb coordinates
bMinC = bMin_nT/oBScl
end subroutine setStreamlineKnobs
!Combines speedy method away from the axis w/ more careful stepping nearby
subroutine Step_MAGE(gpt,Model,ebState,eps,h,dx,iB,oB)
type(GridPoint_T), intent(inout) :: gpt
@@ -54,13 +84,20 @@ module streamutils
if ( (.not. present(iB)) .or. (.not. present(oB)) ) then
write(*,*) "Non-optional error in step_mage"
stop
endif
!Test field magnitude
if (norm2(iB) <= bMinC) then
!Get outta here
dx = 0.0
h = 0.0
if (present(oB)) oB = 0.0
return
endif
j0 = gpt%ijkG(JDIR)
! !Get approx number of rings, 4/8/12/16 (DQOH)
! Nr = nint( 4*( log(1.0*ebState%ebGr%Nkp/64.0)/log(2.0) + 1) )
Nr = 4 !Just using 4 rings
Nr = 2 !Number of rings to treat carefully
isAxS = (j0 < ebState%ebGr%js+Nr)
isAxE = (j0 > ebState%ebGr%je-Nr)

144
src/drivers/testNewRMS.F90 Normal file
View File

@@ -0,0 +1,144 @@
! Simple driver to test remix reader into ShellGrid
program testNewRMS
use kdefs
use XML_Input
use remixReader
use shellInterp
use ioh5
implicit none
character(len=strLen) :: xmlStr = 'testRM.xml'
character(len=strLen) :: ftag = 'msphere'
character(len=100) :: fOutname = "rms.h5"
type(XML_Input_T) :: inpXML
type(rmReader_T) :: rmReader
integer :: i
real(rp) :: dt, t
inpXML = New_XML_Input(trim(xmlStr),"Kaiju/REMIX",.true.)
call initRM(ftag, inpXML, rmReader)
call dump(fOutname, rmReader%shGr)
dt = 0.5 * (rmReader%rmTab%times(rmReader%rmTab%N) - rmReader%rmTab%times(rmReader%rmTab%N-1))
write(*,*)"dt=",dt
t = rmReader%rmTab%times(2)
write(*,*)t
!do while (t < dt*20)
do while (t < rmReader%rmTab%times(rmReader%rmTab%N) + 4*dt)
call updateRM(rmReader, t)
call testShellInterp(rmReader)
t = t + dt
enddo
contains
subroutine testShellInterp(rmS)
type(rmReader_T), intent(in) :: rmS
type(ShellGridVar_T) :: tmpVarSame, tmpVarCC
integer :: i,j
integer, dimension(:), allocatable :: i0_arr, j0_arr
call initShellVar(rmS%shGr, rmS%nsFac(NORTH)%loc, tmpVarSame)
tmpVarSame%mask = rmS%nsFac(NORTH)%mask
call initShellVar(rmS%shGr, SHCC, tmpVarCC)
tmpVarCC%mask = .true.
call InterpShellVar_TSC_SG(rmS%shGr, rmS%nsFac(NORTH), rmS%shGr, tmpVarSame) ! RM fac to same location
call InterpShellVar_TSC_SG(rmS%shGr, rmS%nsFac(NORTH), rmS%shGr, tmpVarCC) ! RM fac to cell centers
call dump(fOutname, rmS%shGr, rmS%nsFac(NORTH), "nsFac")
call dump(fOutname, rmS%shGr, tmpVarSame, "tmpVarSame")
call dump(fOutname, rmS%shGr, tmpVarCC, "tmpVarCC")
call InterpShellVar_TSC_SG(rmS%shGr, tmpVarCC, rmS%shGr, tmpVarSame) ! CCs back to corners
call dump(fOutname, rmS%shGr, tmpVarSame, "tmpVarC2CC2C")
! Try out letting InterpShellVar_TSC_pnt calculate its own dx
tmpVarSame%data = 0.0
!$OMP PARALLEL DO default(shared) collapse(1) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=rmS%shGr%jsg,rmS%shGr%jeg+1
do i=rmS%shGr%isg,rmS%shGr%ieg+1
if (.not. tmpVarSame%mask(i,j)) cycle
call InterpShellVar_TSC_pnt( \
rmS%shGr, rmS%nsFac(NORTH),\
rmS%shGr%th(i), rmS%shGr%ph(j),\
tmpVarSame%data(i,j) )
enddo
enddo
call wrapJ_SGV(rmS%shGr, tmpVarSame)
call dump(fOutname, rmS%shGr, tmpVarSame, "tmpVar_noDCell") ! Should be equal to "tmpVarSame"
stop
end subroutine testShellInterp
subroutine dump(fname, shGr, varO, vNameO)
character(len=100), intent(in) :: fname
type(ShellGrid_T), intent(in) :: shGr
type(ShellGridVar_T), optional, intent(in) :: varO
character(len=*), optional, intent(in) :: vNameO
character(len=100) :: tmp
type(IOVAR_T), dimension(5) :: IOVars
! If varO not present, we assume we are starting fresh
! Wipe anything there and write shellGrid info
! If varO present, assume we are writing it out
if (.not. present(varO)) then
call CheckAndKill(fname, .true.)
call ClearIO(IOVars)
call AddOutVar(IOVars,"sh_th",shGr%th)
call AddOutVar(IOVars,"sh_ph",shGr%ph)
call WriteVars(IOVars,.true.,fname)
return
endif
if (.not. present(vNameO)) then
write(*,*)"vnameO"
stop
endif
! If still here, varO and vNameO present
call ClearIO(IOVars)
write(tmp,'(A,A)')trim(vNameO), "_data"
call AddOutVar(IOVars,tmp,varO%data)
write(tmp,'(A,A)')trim(vNameO), "_mask"
call AddOutVar(IOVars, tmp, merge(1.0_rp,0.0_rp,varO%mask))
call WriteVars(IOVars,.true.,fname)
end subroutine dump
subroutine dump1Dint(fname, var, vName)
character(len=100), intent(in) :: fname
integer, dimension(:), intent(in) :: var
character(len=*), intent(in) :: vName
character(len=100) :: tmp
type(IOVAR_T), dimension(5) :: IOVars
! If varO not present, we assume we are starting fresh
! Wipe anything there and write shellGrid info
! If varO present, assume we are writing it out
! If still here, varO and vNameO present
call ClearIO(IOVars)
call AddOutVar(IOVars,vName, var*1.0_rp)
call WriteVars(IOVars,.true.,fname)
end subroutine dump1Dint
end program testNewRMS

View File

@@ -28,6 +28,7 @@ program voltron_mpix
logical :: tagSet
type(XML_Input_T) :: xmlInp
real(rp) :: nextDT
integer :: divideSize,i
! initialize MPI
!Set up MPI with or without thread support
@@ -71,6 +72,8 @@ program voltron_mpix
call ReadXmlImmediate(trim(inpXML),'/Kaiju/Voltron/Helpers/numHelpers',helpersBuf,'0',.false.)
read(helpersBuf,*) numHelpers
if(.not. useHelpers) numHelpers = 0
call ReadXmlImmediate(trim(inpXML),'/Kaiju/Voltron/coupling/doGCM',helpersBuf,'F',.false.)
read(helpersBuf,*) vApp%doGCM
! create a new MPI communicator for just Gamera
! for now this is always all ranks excep the last one (which is reserved for voltron)
@@ -96,6 +99,76 @@ program voltron_mpix
call mpi_Abort(MPI_COMM_WORLD, 1, ierror)
end if
! GCM init
if (worldRank .eq. worldSize-1-numHelpers) then
call MPI_Comm_Split(MPI_COMM_WORLD, mageId, 0, vApp%mageCplComm, ierror)
call MPI_Comm_Rank(vApp%mageCplComm, vApp%voltCplRank, ierror)
call MPI_Comm_Size(vApp%mageCplComm, vApp%CplSize, ierror)
if(vApp%mageCplComm /= MPI_COMM_NULL) then
print *,'VOLTRON has created an mageCplComm with ', vApp%CplSize-1, ' other app(s)'
print *,'VOLTRON using mageCplComm tag ', mageId
write(*,*) "VOLTRON CPLCOMM: ",vApp%mageCplComm,vApp%voltCplRank
! Tell everyone who I am
if (.not.allocated(vApp%IAm)) allocate(vApp%IAm(vApp%CplSize))
vApp%IAm(vApp%voltCplRank+1) = voltId
do i=1,vApp%CplSize
call MPI_Bcast(vApp%IAm(i),1,MPI_INTEGER,i-1,vApp%mageCplComm,ierror)
enddo
! IAm array starts at 1, MPI Ranks start at 0
do i=1,vApp%CplSize
! Assign rank if match
select case (vApp%IAm(i))
case (voltId)
if (i-1 .ne. vApp%voltCplRank) then
write(*,*) "I AM NOT MYSELF:",i-1,vApp%voltCplRank
endif
case (gamId)
write(*,*) "Gam not involved in MAGE2MAGE yet"
case (rcmId)
write(*,*) "RCM not involved in MAGE2MAGE yet"
case (hidraNId)
vApp%hidraNCplRank = i-1
write(*,*) "Volt coupling to hidraN"
case (hidraSId)
vApp%hidraSCplRank = i-1
write(*,*) "Volt coupling to hidraS"
case (hidraId)
vApp%hidraCplRank = i-1
write(*,*) "Volt coupling to hidra"
case (tgcmId)
vApp%gcmCplRank = i-1
write(*,*) "Volt coupling to TIEGCM"
case default
write(*,*) "Volt does not know about this Coupling ID: ", vApp%IAm(i)
end select
enddo
endif
if(vApp%CplSize == 1 .and. vApp%doGCM .eq. .false.) then
write(*,*) "VOLTRON: We're not coupling to a GCM"
call mpi_comm_free(vApp%mageCplComm, ierror)
if(ierror /= MPI_Success) then
call MPI_Error_string( ierror, message, length, ierror)
print *,message(1:length)
call mpi_Abort(MPI_COMM_WORLD, 1, ierror)
end if
vApp%mageCplComm = MPI_COMM_NULL
endif
if (vApp%CplSize == 1 .and. vApp%doGCM .eq. .true.) then
write(*,*) "VOLTRON: Coupling to GCM Failed."
call mpi_Abort(MPI_COMM_WORLD, 1, ierror)
endif
else
call MPI_Comm_Split(MPI_COMM_WORLD, MPI_UNDEFINED, worldRank, vApp%mageCplComm, ierror)
endif
if(ierror /= MPI_Success) then
call MPI_Error_string( ierror, message, length, ierror)
print *,message(1:length)
call mpi_Abort(MPI_COMM_WORLD, 1, ierror)
endif
! voltron
vApp%vOptions%gamUserInitFunc => initUser
vApp%vOptionsMpi%allComm = MPI_COMM_WORLD

Some files were not shown because too many files have changed in this diff Show More