Merged development into external_contrib

This commit is contained in:
Nikhil Rao
2025-11-17 14:36:10 +00:00
111 changed files with 3476 additions and 2336 deletions

4
.gitignore vendored
View File

@@ -8,11 +8,15 @@ external/FARGPARSE-*/
external/GFTL-*/
external/GFTL_SHARED-*/
external/PFUNIT-*/
# skip F90 files in the tests folder, except in specific subfolders
tests/*/*.F90
!tests/helperCode/*.F90
!tests/helperCode_mpi/*.F90
# any local automated tests that users have run
test_runs/
# Pre-compile generated files
src/base/git_info.F90

91
CITATION.cff Normal file
View File

@@ -0,0 +1,91 @@
cff-version: 1.2.0
message: "If you use this software, please cite it using the metadata below."
title: "MAGE: Multiscale Atmosphere Geospace Environment Model"
version: "1.25.1"
date-released: 2025-07-03
doi: 10.5281/zenodo.16818682
repository-code: "https://github.com/JHUAPL/kaiju"
license: "BSD 3-Clause"
authors:
- family-names: Merkin
given-names: Slava
orcid: https://orcid.org/0000-0003-4344-5424
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Arnold
given-names: Harry
orcid: https://orcid.org/0000-0002-0449-1498
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Bao
given-names: Shanshan
orcid: https://orcid.org/0000-0002-5209-3988
affiliation: "Rice University"
- family-names: Garretson
given-names: Jeffery
orcid: https://orcid.org/0000-0003-3805-9860
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Lin
given-names: Dong
affiliation: "NSF National Center for Atmospheric Research"
orcid: https://orcid.org/0000-0003-2894-6677
- family-names: Lyon
given-names: John
affiliation: "Gamera Consulting"
orcid: https://orcid.org/0000-0002-5759-9849
- family-names: McCubbin
given-names: Andrew
orcid: https://orcid.org/0000-0002-6222-3627
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Michael
given-names: Adam
orcid: https://orcid.org/0000-0003-2227-1242
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Pham
given-names: Kevin
orcid: https://orcid.org/0000-0001-5031-5519
affiliation: "NSF National Center for Atmospheric Research"
- family-names: Provornikova
given-names: Elena
orcid: https://orcid.org/0000-0001-8875-7478
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Rao
given-names: Nikhil
affiliation: "NSF National Center for Atmospheric Research"
orcid: https://orcid.org/0000-0003-2639-9892
- family-names: Sciola
given-names: Anthony
orcid: https://orcid.org/0000-0002-9752-9618
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Sorathia
given-names: Kareem
orcid: https://orcid.org/0000-0002-6011-5470
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Toffoletto
given-names: Frank
orcid: https://orcid.org/0000-0001-7789-2615
affiliation: "Rice University"
- family-names: Ukhorskiy
given-names: Aleksandr
orcid: https://orcid.org/0000-0002-3326-4024
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Wang
given-names: Wenbin
orcid: https://orcid.org/0000-0002-6287-4542
affiliation: "NSF National Center for Atmospheric Research"
- family-names: Wiltberger
given-names: Michael
orcid: https://orcid.org/0000-0002-4844-3148
affiliation: "NSF National Center for Atmospheric Research"
- family-names: Winter
given-names: Eric
orcid: https://orcid.org/0000-0001-5226-2107
affiliation: "Johns Hopkins University Applied Physics Laboratory"
- family-names: Wu
given-names: Haonon
orcid: https://orcid.org/0000-0002-3272-8106
affiliation: "NSF National Center for Atmospheric Research"
keywords:
- "space weather"
- "MAGE"
- "geospace modeling"

View File

@@ -1,9 +1,6 @@
cmake_minimum_required(VERSION 3.20.2)
project(Kaiju Fortran)
# use the new version of CMP0074, this tells cmake to use <PACKAGE>_ROOT environment
# variables when looking for packages
cmake_policy(SET CMP0074 NEW)
project(Kaiju Fortran C)
#K: Adding C to project to deal w/ issues w/ most recent HDF (10/13/25)
# add and search for pfunit (fingers crossed)
list(APPEND CMAKE_PREFIX_PATH "./external")
@@ -161,22 +158,6 @@ add_executable(gamera.x src/drivers/gamerax.F90 ${GAMIC})
target_link_libraries(gamera.x gamlib baselib)
add_dependencies(gamera gamera.x)
#-------------
#Kaiju: Gamera helio
message("Adding Gamera Helio module ...")
#Add source
#add_subdirectory(src/gamera)
#Print gamera helio info
#message("\tBricksize is ${bricksize}")
message("\tIC file is ${GAMHELIC}")
add_custom_target(gamhelio ALL)
message("\tAdding executable gamhelio.x")
add_executable(gamhelio.x src/drivers/gamerax.F90 ${GAMHELIC})
target_link_libraries(gamhelio.x gamlib baselib)
add_dependencies(gamera gamhelio.x)
#-------------
#Kaiju: Dragon King
message("Adding dragonking module ...")
@@ -238,6 +219,18 @@ add_executable(voltron.x src/drivers/voltronx.F90)
target_link_libraries(voltron.x baselib voltlib gamlib dragonkinglib remixlib chimplib raijulib)
add_dependencies(voltron voltron.x)
#-------------
#Kaiju: Gamera helio
message("Adding Gamera Helio module ...")
#Print gamera helio info
message("\tBricksize is ${bricksize}")
message("\tIC file is ${GAMHELIC}")
add_custom_target(gamhelio ALL)
message("\tAdding executable gamhelio.x")
add_executable(gamhelio.x src/drivers/gamerax.F90 ${GAMHELIC})
target_link_libraries(gamhelio.x gamlib baselib)
add_dependencies(gamera gamhelio.x)
if(ENABLE_MPI)
#-------------
#Kaiju: Base MPI
@@ -272,6 +265,7 @@ if(ENABLE_MPI)
#-------------
#Kaiju: Gamera Helio MPI
message("Adding Gamera Helio MPI module ...")
message("\tIC file is ${GAMHELIC}")
add_custom_target(gamhelio_mpi ALL)
message("\tAdding executable gamhelio_mpi.x")
add_executable(gamhelio_mpi.x src/drivers/gamera_mpix.F90 ${GAMHELIC})

View File

@@ -83,7 +83,12 @@ if(CMAKE_Fortran_COMPILER_ID MATCHES Intel)
#Base
string(APPEND CMAKE_Fortran_FLAGS " -fPIC -fpconstant")
#Production
set(PROD "-align array64byte -align rec32byte -no-prec-div -fast-transcendentals")
set(PROD "-align array64byte -align rec32byte -no-prec-div")
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 23.1)
#Fast transcendentals removed in ifx
string(APPEND PROD " -fast-transcendentals")
endif()
#Production with Debug Info
set(PRODWITHDEBUGINFO "-traceback -debug all -align array64byte -align rec32byte -no-prec-div -fast-transcendentals")
#Debug
@@ -123,11 +128,24 @@ if(CMAKE_Fortran_COMPILER_ID MATCHES Intel)
endif()
string(APPEND PROD " -march=core-avx2")
string(APPEND PRODWITHDEBUGINFO " -march=core-avx2")
elseif (HOST MATCHES stampede3)
message("You're on Stampede3!")
if (ENABLE_MKL)
string(APPEND CMAKE_Fortran_FLAGS " -qmkl")
endif()
string(APPEND PROD " -xCORE-AVX512")
string(APPEND PRODWITHDEBUGINFO " -xCORE-AVX512")
endif()
#Check Intel Fortran version
if(NOT ALLOW_INVALID_COMPILERS AND CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER "2021.9")
message(FATAL_ERROR "Intel Fortran compilers newer than 2023 (version 2021.8) are not supported. Set the ALLOW_INVALID_COMPILERS variable to ON to force compilation at your own risk.")
if(NOT ALLOW_INVALID_COMPILERS AND CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER "2021.9" AND CMAKE_Fortran_COMPILER_VERSION VERSION_LESS "2025.1")
message(FATAL_ERROR "Intel OneAPI compilers between 2022-2024 have compiler bugs which cause weird numerical errors in our code. You can set the ALLOW_INVALID_COMPILERS variable to ON to force compilation at your own risk. You'll probably get what you deserve.")
endif()
#Check for MKL + intel25
if (ENABLE_MKL AND (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER "2024"))
message(WARNING "Intel OneAPI MKL has been found to fail in weird ways and should probably be avoided. But hey, do what you want. I'm a warning message, not a cop.")
endif()
elseif(CMAKE_Fortran_COMPILER_ID MATCHES GNU)

View File

@@ -9,18 +9,18 @@
},
"wsa_file": {
"LEVEL": "BASIC",
"prompt": "Path to WSA solar wind file to use",
"prompt": "Path to WSA boundary condition file to use",
"default": "wsa.fits"
},
"start_date": {
"LEVEL": "BASIC",
"prompt": "Start date for simulation (yyyy-mm-ddThh:mm:ss)",
"default": "2001-06-01T23:00:00"
"default": "2017-08-02T19:44:23"
},
"stop_date": {
"LEVEL": "BASIC",
"prompt": "Stop date for simulation (yyyy-mm-ddThh:mm:ss)",
"default": "2001-06-02T01:00:00"
"default": "2017-08-02T21:44:23"
},
"use_segments": {
"LEVEL": "BASIC",
@@ -383,11 +383,6 @@
"prompt": "Cadence for status updates on screen in simulated hours",
"default": "5.0"
},
"dtCon": {
"LEVEL": "EXPERT",
"prompt": "Cadence for status updates on screen in simulated hours",
"default": "5.0"
},
"doTimer": {
"LEVEL": "EXPERT",
"prompt": "Code timing output enabled",

View File

@@ -28,6 +28,7 @@ import json
import os
import sys
import subprocess
import math
# Import 3rd-party modules.
import netCDF4
@@ -356,7 +357,7 @@ def prompt_user_for_run_options(args):
for on in ["use_segments"]:
od[on]["default"] = "Y"
o[on] = makeitso.get_run_option(on, od[on], mode)
if o["use_segments"] == "Y":
if o["use_segments"].upper() == "Y":
for on in ["segment_duration"]:
o[on] = makeitso.get_run_option(on, od[on], mode)
else:
@@ -364,7 +365,7 @@ def prompt_user_for_run_options(args):
# Compute the number of segments based on the simulation duration and
# segment duration, add 1 if there is a remainder.
if o["use_segments"] == "Y":
if o["use_segments"].upper() == "Y":
num_segments = simulation_duration/float(o["segment_duration"])
if num_segments > int(num_segments):
num_segments += 1
@@ -564,7 +565,8 @@ def main():
segment_duration = float(engage_options["simulation"]["segment_duration"])
makeitso_options["voltron"]["time"]["tFin"] = int((t1-t0).total_seconds())
makeitso_options["pbs"]["num_segments"] = str(int((t1-t0).total_seconds()/segment_duration))
num_segments = math.ceil((t1-t0).total_seconds()/segment_duration)
makeitso_options["pbs"]["num_segments"] = str(num_segments)
select2 = 1 + int(makeitso_options["pbs"]["num_helpers"])
makeitso_options["pbs"]["select2"] = str(select2)
@@ -652,7 +654,9 @@ def main():
# Create the PBS job scripts.
pbs_scripts, submit_all_jobs_script = create_pbs_scripts(engage_options,makeitso_options, makeitso_pbs_scripts, tiegcm_options, tiegcm_inp_scripts, tiegcm_pbs_scripts)
print(f"pbs_scripts = {pbs_scripts}")
print(f"GR_pbs_scripts = {makeitso_pbs_scripts}")
print(f"Tiegcm_pbs_scripts = {tiegcm_pbs_scripts}")
print(f"GTR_pbs_scripts = {pbs_scripts}")
print(f"submit_all_jobs_script = {submit_all_jobs_script}")

62
scripts/makeitso/makeitso.py Normal file → Executable file
View File

@@ -33,6 +33,7 @@ import datetime
import json
import os
import subprocess
import math
# Import 3rd-party modules.
import h5py
@@ -419,7 +420,7 @@ def prompt_user_for_run_options(option_descriptions: dict, args: dict):
# condition file can be generated.
for on in ["bcwind_available"]:
o[on] = get_run_option(on, od[on], mode)
if o["bcwind_available"] == "Y":
if o["bcwind_available"].upper() == "Y":
for on in ["bcwind_file"]:
o[on] = get_run_option(on, od[on], mode)
# Fetch the start and stop date from the bcwind file.
@@ -846,7 +847,7 @@ def run_preprocessing_steps(options: dict, args: dict):
# If needed, create the solar wind file by fetching data from CDAWeb.
# NOTE: Assumes kaipy is installed.
if options["simulation"]["bcwind_available"] == "N":
if options["simulation"]["bcwind_available"].upper() == "N":
cmd = "cda2wind"
args = [cmd, "-t0", options["simulation"]["start_date"], "-t1",
options["simulation"]["stop_date"], "-interp", "-bx",
@@ -889,26 +890,28 @@ def create_ini_files(options: dict, args: dict):
# Set default value for padding to tFin for coupling.
tfin_padding = 0.0
# Engage modifications to parameters.
# If TIEGCM coupling is specified, warmup segments are calculated
# based on gr_warm_up_time and segment duration. If the segment
# duration is not evenly divisible by gr_warm_up_time, the
# warmup segment duration is set to gr_warm_up_time/4.
# The number of warmup segments is set to gr_warm_up_time/
# warmup_segment_duration.
# If TIEGCM coupling is specified, calculate warmup segments based
# on gr_warm_up_time and segment_duration.
# If gr_warm_up_time is an exact multiple of segment_duration,
# use segment_duration for each warmup segment.
# If gr_warm_up_time is less than segment_duration, use
# gr_warm_up_time as the duration for a single warmup segment.
# If gr_warm_up_time is greater than segment_duration but not an
# exact multiple, use segment_duration and round up the number of segments.
if "coupling" in args:
coupling = args["coupling"]
gr_warm_up_time = float(coupling["gr_warm_up_time"])
segment_duration = float(options["simulation"]["segment_duration"])
i_last_warmup_ini = (gr_warm_up_time/segment_duration)
if i_last_warmup_ini == int(i_last_warmup_ini):
simulation_duration = float(options["voltron"]["time"]["tFin"])
if gr_warm_up_time % segment_duration == 0:
warmup_segment_duration = segment_duration
else:
warmup_segment_duration = gr_warm_up_time/4
if warmup_segment_duration != int(warmup_segment_duration):
print("Error: gr_warm_up_time is not evenly divisible by 4.")
raise ValueError("Invalid gr_warm_up_time value.")
i_last_warmup_ini = (gr_warm_up_time/warmup_segment_duration)
i_last_warmup_ini = int(i_last_warmup_ini)
i_last_warmup_ini = int(gr_warm_up_time/warmup_segment_duration)
elif gr_warm_up_time < segment_duration:
warmup_segment_duration = gr_warm_up_time
i_last_warmup_ini = int(gr_warm_up_time/warmup_segment_duration)
elif gr_warm_up_time > segment_duration:
warmup_segment_duration = segment_duration
i_last_warmup_ini = int(math.ceil(gr_warm_up_time/segment_duration))
# Add padding to tFin for coupling.
if coupling["tfin_delta"] == "T":
tfin_coupling_padding = float(options["voltron"]["coupling"]["dtCouple"]) - 1
@@ -984,9 +987,11 @@ def create_ini_files(options: dict, args: dict):
num_warmup_segments = i_last_warmup_ini
# Create an .ini file for each simulation segment. Files for each
# segment will be numbered starting with 1.
print(f"Creating {options['pbs']['num_segments']} segments, "
f"with {num_warmup_segments} warmup segments.")
for job in range(1, int(options["pbs"]["num_segments"]) + 1 - num_warmup_segments):
if "coupling" in args:
num_segments = math.ceil((simulation_duration - num_warmup_segments*warmup_segment_duration)/segment_duration)
else:
num_segments = int(options["pbs"]["num_segments"])
for job in range(1, num_segments + 1):
opt = copy.deepcopy(options) # Need a copy of options
runid = opt["simulation"]["job_name"]
# NOTE: This naming scheme supports a maximum of 99 segments.
@@ -1009,7 +1014,7 @@ def create_ini_files(options: dict, args: dict):
if "coupling" in args:
opt["voltron"]["coupling"]["doGCM"] = doGCM
# tFin padding different for last segment.
if job == int(options["pbs"]["num_segments"]) - num_warmup_segments:
if job == num_segments:
tfin_padding = -1.0
else:
# Subtract 1 from tFin padding for coupling beacuse to offset the +1.0 for restart file done above.
@@ -1210,9 +1215,20 @@ def create_pbs_scripts(xml_files: list, options: dict, args: dict):
coupling = args["coupling"]
gr_warm_up_time = float(coupling["gr_warm_up_time"])
segment_duration = float(options["simulation"]["segment_duration"])
i_last_warmup_pbs_script = int(gr_warm_up_time/segment_duration)
simulation_duration = float(options["voltron"]["time"]["tFin"])
if gr_warm_up_time % segment_duration == 0:
warmup_segment_duration = segment_duration
i_last_warmup_ini = int(gr_warm_up_time/warmup_segment_duration)
elif gr_warm_up_time < segment_duration:
warmup_segment_duration = gr_warm_up_time
i_last_warmup_ini = int(gr_warm_up_time/warmup_segment_duration)
elif gr_warm_up_time > segment_duration:
warmup_segment_duration = segment_duration
i_last_warmup_ini = int(math.ceil(gr_warm_up_time/segment_duration))
num_warmup_segments = i_last_warmup_ini
#i_last_warmup_pbs_script = int(gr_warm_up_time/segment_duration)
spinup_pbs_scripts.append(pbs_scripts[0]) # Spinup script is first
warmup_pbs_scripts = pbs_scripts[1:i_last_warmup_pbs_script + 1] # Warmup scripts
warmup_pbs_scripts = pbs_scripts[1:num_warmup_segments + 1] # Warmup scripts
# Return the paths to the PBS scripts.
return pbs_scripts, submit_all_jobs_script,spinup_pbs_scripts, warmup_pbs_scripts

View File

@@ -15,7 +15,7 @@ module arrayutil
subroutine fillArray1D_R(array, val)
real(rp), dimension(:), allocatable, intent(inout) :: array
real(rp), dimension(:), intent(inout) :: array
real(rp), intent(in) :: val
integer :: lbds(1),ubds(1),i
@@ -37,7 +37,7 @@ module arrayutil
end subroutine fillArray1D_R
subroutine fillArray1D_I(array, val)
integer, dimension(:), allocatable, intent(inout) :: array
integer, dimension(:), intent(inout) :: array
integer, intent(in) :: val
integer :: lbds(1),ubds(1),i
@@ -59,7 +59,7 @@ module arrayutil
end subroutine fillArray1D_I
subroutine fillArray2D_R(array, val)
real(rp), dimension(:,:), allocatable, intent(inout) :: array
real(rp), dimension(:,:), intent(inout) :: array
real(rp), intent(in) :: val
integer :: lbds(2),ubds(2),i,j
@@ -83,7 +83,7 @@ module arrayutil
end subroutine fillArray2D_R
subroutine fillArray2D_I(array, val)
integer, dimension(:,:), allocatable, intent(inout) :: array
integer, dimension(:,:), intent(inout) :: array
integer, intent(in) :: val
integer :: lbds(2),ubds(2),i,j
@@ -107,7 +107,7 @@ module arrayutil
end subroutine fillArray2D_I
subroutine fillArray3D_R(array, val)
real(rp), dimension(:,:,:), allocatable, intent(inout) :: array
real(rp), dimension(:,:,:), intent(inout) :: array
real(rp), intent(in) :: val
integer :: lbds(3),ubds(3),i,j,k
@@ -133,7 +133,7 @@ module arrayutil
end subroutine fillArray3D_R
subroutine fillArray3D_I(array, val)
integer, dimension(:,:,:), allocatable, intent(inout) :: array
integer, dimension(:,:,:), intent(inout) :: array
integer, intent(in) :: val
integer :: lbds(3),ubds(3),i,j,k
@@ -159,7 +159,7 @@ module arrayutil
end subroutine fillArray3D_I
subroutine fillArray4D_R(array, val)
real(rp), dimension(:,:,:,:), allocatable, intent(inout) :: array
real(rp), dimension(:,:,:,:), intent(inout) :: array
real(rp), intent(in) :: val
integer :: lbds(4),ubds(4),i,j,k
@@ -185,7 +185,7 @@ module arrayutil
end subroutine fillArray4D_R
subroutine fillArray4D_I(array, val)
integer, dimension(:,:,:,:), allocatable, intent(inout) :: array
integer, dimension(:,:,:,:), intent(inout) :: array
integer, intent(in) :: val
integer :: lbds(4),ubds(4),i,j,k
@@ -211,7 +211,7 @@ module arrayutil
end subroutine fillArray4D_I
subroutine fillArray5D_R(array, val)
real(rp), dimension(:,:,:,:,:), allocatable, intent(inout) :: array
real(rp), dimension(:,:,:,:,:), intent(inout) :: array
real(rp), intent(in) :: val
integer :: lbds(5),ubds(5),i,j,k
@@ -237,7 +237,7 @@ module arrayutil
end subroutine fillArray5D_R
subroutine fillArray5D_I(array, val)
integer, dimension(:,:,:,:,:), allocatable, intent(inout) :: array
integer, dimension(:,:,:,:,:), intent(inout) :: array
integer, intent(in) :: val
integer :: lbds(5),ubds(5),i,j,k

View File

@@ -38,6 +38,7 @@ module clocks
!Depth/parent, ie array entry of parent of this clock
integer :: level=-1,parent=-1
integer :: iTic=0,iToc=0 !Integer ticks
integer :: nCalls=0 !Number of tocs, or finished timer loops since cleaning
real(rp) :: tElap=0.0 !Elapsed time
end type Clock_T
@@ -50,6 +51,11 @@ module clocks
interface readClock
module procedure readClock_str, readClock_int
end interface
!interface for reading number of calls to a clock
interface readNCalls
module procedure readNCalls_str, readNCalls_int
end interface
contains
@@ -171,6 +177,8 @@ module clocks
wclk = real(kClocks(iblk)%iToc-kClocks(iblk)%iTic)/real(clockRate)
kClocks(iblk)%tElap = kClocks(iblk)%tElap + wclk
kClocks(iblk)%nCalls = kClocks(iblk)%nCalls + 1
end subroutine Toc
!Reset clocks
@@ -179,6 +187,7 @@ module clocks
do n=1,nclk
kClocks(n)%tElap = 0
kClocks(n)%nCalls = 0
! if the clock is active, reset the tic to right now
if(kClocks(n)%isOn) call Tic(kClocks(n)%cID, .true.)
enddo
@@ -223,6 +232,39 @@ module clocks
endif
end function readClock_int
function readNCalls_str(cID) result(nc)
character(len=*), intent(in) :: cID
integer :: n,iblk
integer :: nc
iblk = 0
!Find timer
do n=1,nclk
if (toUpper(kClocks(n)%cID) == toUpper(cID)) then
!Found it, save ID
iblk = n
endif
enddo
nc = readNCalls_int(iblk)
end function readNCalls_str
function readNCalls_int(iblk) result(nc)
integer, intent(in) :: iblk
integer :: tmpToc
integer :: nc
if (iblk == 0) then
nc = 0
else
nc = kClocks(iblk)%nCalls
endif
end function readNCalls_int
!Output clock data
subroutine printClocks()
integer :: n,l

View File

@@ -70,12 +70,17 @@ module raijudefs
real(rp), parameter :: def_cfl = 0.3
real(rp), parameter :: cflMax = 0.3
logical, parameter :: def_doUseVelLRs = .true.
logical, parameter :: def_doSmoothGrads = .true.
! Domain limits
! Buffer not allowed beyond min of maxTail and maxSun
real(rp), parameter :: def_maxTail_buffer = 15.0 ! [Rp]
real(rp), parameter :: def_maxSun_buffer = 10.0 ! [Rp]
! Active not allowed beyond min of maxTail and maxSun
real(rp), parameter :: def_maxTail_active = 10.0 ! [Rp]
real(rp), parameter :: def_maxSun_active = 10.0 ! [Rp]
! Active is forced below activeDomRad
real(rp), parameter :: def_activeDomRad = 3.0 ! [Rp]
! Settings
integer, parameter :: raiRecLen = 8
@@ -97,14 +102,19 @@ module raijudefs
!! [deg] Max allowable angle between any two normals surrounding a cell corner
real(rp), parameter :: def_bminThresh = 1.0
!! [nT] default allowable bmin strencgh
real(rp), parameter :: def_nBounce = 1.0
real(rp), parameter :: def_nBounce = 3.0
!! Number of Alfven bounces (Tb) required to be considered a "good" flux tube for coupling
real(rp), parameter :: def_lim_vaFrac_soft = 0.6_rp
real(rp), parameter :: def_lim_vaFrac_hard = 0.4_rp
real(rp), parameter :: def_lim_bmin_soft = 5.0_rp
! [nT]
!! [nT]
real(rp), parameter :: def_lim_bmin_hard = 2.0_rp
!! [nT]
! Plasmasphere stuff
real(rp), parameter :: def_psphEvolRad = 2.25
!! [Rp] radius under which plasmasphere is not evolved
end module raijudefs

View File

@@ -61,13 +61,24 @@ module earthhelper
real(rp), intent(in) :: kp
!Can only be 1,5
if ( (kp>=1.0) .and. (kp<=5.0) ) then
kpDefault = kp
else if (kp>5.0) then
kpDefault = 5.0
endif
kpDefault = GallagherKp(kp)
end subroutine SetKp0
! Routine to enforce kp to Gallagher's valid range [1,5]
pure function GallagherKp(kp) result(validKp)
real(rp), intent(in) :: kp
real(rp) :: validKp
if (kp < 1.0_rp) then
validKp = 1.0_rp
else if (kp > 5.0_rp) then
validKp = 5.0_rp
else
validKp = kp
endif
end function GallagherKp
!Return 2D gallagher density afa r,phi (rad)
function GallagherRP(r,phi,kpO) result(D)
real(rp), intent(in) :: r,phi
@@ -97,7 +108,8 @@ module earthhelper
D = 0.0
if (present(kpO)) then
kp = nint(kpO) ! Gallagher model takes integer Kp
!Can only be 1,5
kp = nint(GallagherKp(kpO))
else
kp = nint(kpDefault)
endif

View File

@@ -917,10 +917,10 @@ contains
!Create cache group
call h5gcreate_f(h5fId,trim(attrGrpName),cacheId,herr)
cacheCreate = .true.
else
! Open attribute cache group
call h5gopen_f(h5fId,trim(attrGrpName),cacheId,herr)
endif
! Open attribute cache group
call h5gopen_f(h5fId,trim(attrGrpName),cacheId,herr)
! Check attribute cache size and resize
call CheckAttCacheSize(trim(gStrO), cacheId, cacheExist, cacheCreate)
! Write Step# to cache

View File

@@ -106,7 +106,7 @@ module shellInterp
case(SHGR_CC)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
!$OMP private(i,j,goodInterp)
do j=sgDest%jsg,sgDest%jeg
do i=sgDest%isg,sgDest%ieg
if (.not. varOut%mask(i,j)) cycle
@@ -125,7 +125,7 @@ module shellInterp
case(SHGR_CORNER)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
!$OMP private(i,j,goodinterp)
do j=sgDest%jsg,sgDest%jeg+1
do i=sgDest%isg,sgDest%ieg+1
if (.not. varOut%mask(i,j)) cycle
@@ -142,7 +142,7 @@ module shellInterp
case(SHGR_FACE_THETA)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
!$OMP private(i,j,goodInterp)
do j=sgDest%jsg,sgDest%jeg
do i=sgDest%isg,sgDest%ieg+1
if (.not. varOut%mask(i,j)) cycle
@@ -159,7 +159,7 @@ module shellInterp
case(SHGR_FACE_PHI)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
!$OMP private(i,j,goodInterp)
do j=sgDest%jsg,sgDest%jeg+1
do i=sgDest%isg,sgDest%ieg
if (.not. varOut%mask(i,j)) cycle

View File

@@ -147,6 +147,11 @@ module raijutypes
!--- State ---!
logical :: doCS_next_preAdv = .false.
!! Signal to run coldstart next time raiju preAdvances
real(rp) :: modelDst_next_preAdv = 0.0_rp
!! Target Dst [nT] when we run coldstart next
logical :: doneFirstCS = .false.
!! Have we executed once already?
real(rp) :: lastEval = -1*HUGE
@@ -221,6 +226,8 @@ module raijutypes
!! For debug
logical :: writeGhosts
!! For debug
logical :: doSmoothGrads
!! Whether or not we smooth variables (bvol and electric potential) before taking gradients
logical :: doClockConsoleOut
!! If we are driving, output clock info
logical :: doOutput_potGrads
@@ -245,6 +252,8 @@ module raijutypes
real(rp) :: psphInitKp
logical :: doPsphEvol
!! Whether or not to actually evolve the plasmasphere
real(rp) :: psphEvolRad
!! [Rp] Radius below which plasmasphere is not evolved
! TODO: Extra params for refilling rate, determining initial profile, etc.
! Some constants
@@ -261,6 +270,8 @@ module raijutypes
!! Maximum tailward extent of the active region
real(rp) :: maxSun_active
!! Maximum sunward extent of the active region
real(rp) :: activeDomRad
!! [Rp] Cells are forced to be active below this radius
! Active shell settings
logical :: doActiveShell
@@ -390,6 +401,10 @@ module raijutypes
type(IOClock_T) :: IO
!! Timers for IO operations
! I feel like philosophically this should be in Grid but that feels weird so its here
type(TimeSeries_T) :: KpTS
!! Kp timeseries from solar wind file
! -- Solver values -- !
real(rp), dimension(:,:,:), allocatable :: eta
!! (Ngi, Ngj, Nk) [#/cc * Rp/T] etas

View File

@@ -175,6 +175,10 @@ module volttypes
!! [s] Time scale over which we ramp up to full IM_TSCL for MHD ingestion
logical :: doColdstartCX = .true.
!! Whether or not we apply charge exchange to initial coldstart ion profile
real(rp) :: tsclSm_dL = 1.0_rp
!! [Rp] theta/L-direction smoothing scale for mhd ingestion timescale
real(rp) :: tsclSm_dMLT = 1.0_rp
!! [hr] phi/MLT-direction smoothing scale for mhd ingestion timescale
! --- Grid --- !
type(ShellGrid_T) :: shGr
@@ -219,13 +223,18 @@ module volttypes
!! Used to limit active region to tubes that can reasonably be treated as averaged and slowly-evolving
type(ShellGridVar_T) :: Tb
!! (Ngi, Ngj) Bounce timesale
type(ShellGridVar_T) :: avgBeta
!! (Ngi, Ngj) Cross-sectional flux-tube area - averaged beta (woof)
type(ShellGridVar_T) :: pot_total
!! Total electrostatic potential from (ionosphere + corot) [kV]
type(ShellGridVar_T) :: pot_corot
!! Just corotation potential, just for output purposes [kV]
! Post-advance stuff leaving raiju
type(ShellGridVar_T) :: tscl_mhdIngest
!! [s] Timescale at which we think MHD should ingest our info
contains
procedure :: toIMAG => volt2RAIJU

View File

@@ -65,22 +65,19 @@ module chmpfields
allocate(By(Nip,Njp,Nkp))
allocate(Bz(Nip,Njp,Nkp))
allocate(Vx(Nip,Njp,Nkp,0:Model%nSpc))
allocate(Vy(Nip,Njp,Nkp,0:Model%nSpc))
allocate(Vz(Nip,Njp,Nkp,0:Model%nSpc))
if (Model%doMHD) then
allocate(D (Nip,Njp,Nkp,0:Model%nSpc))
allocate(P (Nip,Njp,Nkp,0:Model%nSpc))
allocate(Vx(Nip,Njp,Nkp,0:Model%nSpc))
allocate(Vy(Nip,Njp,Nkp,0:Model%nSpc))
allocate(Vz(Nip,Njp,Nkp,0:Model%nSpc))
else
allocate(Vx(Nip,Njp,Nkp,0))
allocate(Vy(Nip,Njp,Nkp,0))
allocate(Vz(Nip,Njp,Nkp,0))
endif
if (Model%doJ) then
allocate(Jx(Nip,Njp,Nkp))
allocate(Jy(Nip,Njp,Nkp))
allocate(Jz(Nip,Njp,Nkp))
endif
!------------

View File

@@ -12,6 +12,7 @@ module streamline
real(rp), private :: ShueScl = 2.0 !Safety factor for Shue MP
real(rp), private :: rShue = 6.0 !Radius to start checking Shue
integer , private :: NpChk = 10 !Cadence for Shue checking
logical , private :: doShueG = .false. !Global doShue, can be overriden by optional arguments to routines
contains
@@ -21,15 +22,16 @@ module streamline
if (Model%isMAGE .and. (trim(toUpper(Model%uID)) == "EARTHCODE")) then
!This is for Earth and we're running in tandem w/ mage
!Setup shue for short-circuiting
write(*,*) "Initializing SHUE-MP checking ..."
call inpXML%Set_Val(ShueScl,'streamshue/ShueScl' ,ShueScl)
call inpXML%Set_Val(rShue ,'streamshue/rShue' ,rShue )
call inpXML%Set_Val(NpChk ,'streamshue/NpChk' ,NpChk )
call inpXML%Set_Val(doShueG,'streamshue/doShue' ,.false.)
if (doShueG) write(*,*) "Initializing SHUE-MP checking ..."
else
!Otherwise don't care about Shue
rShue = HUGE
doShueG = .false.
endif
end subroutine setShue
!Trace field line w/ seed point x0 (both directions)
@@ -84,15 +86,11 @@ module streamline
doSH = .false.
endif
!If optional argument, do whatever it says. Otherwise default to global value
if (present(doShueO)) then
doShue = doShueO
else
doShue = .false.
endif
if ( (.not. doSH) .and. (.not. doNH) ) then
!What are you even asking for?
return
doShue = doShueG
endif
!Allocate temp arrays to hold information along each direction
@@ -754,7 +752,7 @@ module streamline
if (present(doShueO)) then
doShue = doShueO
else
doShue = .false.
doShue = doShueG
endif
!Initialize
@@ -832,7 +830,7 @@ module streamline
!Slimmed down projection to northern hemisphere for MAGE
!RinO is optional cut-off inner radius when in northern hemisphere
!epsO is optional epsilon (otherwise use Model default)
subroutine mageproject(Model,ebState,x0,t,xyz,Np,isG,epsO,MaxStepsO)
subroutine mageproject(Model,ebState,x0,t,xyz,Np,isG,epsO,MaxStepsO,doShueO)
type(chmpModel_T), intent(in) :: Model
type(ebState_T), intent(in) :: ebState
real(rp), intent(in) :: x0(NDIM),t
@@ -841,12 +839,13 @@ module streamline
logical, intent(out) :: isG
real(rp), intent(in), optional :: epsO
integer , intent(in), optional :: MaxStepsO
logical , intent(in), optional :: doShueO
type(GridPoint_T) :: gPt
integer :: sgn,MaxSteps
real(rp) :: eps,h
real(rp), dimension(NDIM) :: dx,B,oB
logical :: inDom,isSC,isDone
logical :: inDom,isSC,isDone,doShue
if (present(epsO)) then
eps = epsO
@@ -860,6 +859,12 @@ module streamline
MaxSteps = MaxFL
endif
if (present(doShueO)) then
doShue = doShueO
else
doShue = doShueG
endif
sgn = +1 !Step towards NH
isG = .false.
!Initialize
@@ -940,7 +945,7 @@ module streamline
logical :: inMP
inDom = inDomain(xyz,Model,ebState%ebGr)
if ( (modulo(Np,NpChk) == 0) .and. (norm2(gPt%xyz)>=rShue) ) then
if ( doShue .and. (modulo(Np,NpChk) == 0) .and. (norm2(gPt%xyz)>=rShue) ) then
inMP = inShueMP_SM(xyz,ShueScl)
else
inMP = .true.

View File

@@ -30,6 +30,7 @@ program voltron_mpix
type(XML_Input_T) :: xmlInp
real(rp) :: nextDT
integer :: divideSize,i
logical :: doResetClocks = .false.
! initialize MPI
!Set up MPI with or without thread support
@@ -206,12 +207,12 @@ program voltron_mpix
if (vApp%IO%doTimerOut) then
call printClocks()
endif
call cleanClocks()
doResetClocks = .true.
elseif (vApp%IO%doTimer(vApp%time)) then
if (vApp%IO%doTimerOut) then
call printClocks()
endif
call cleanClocks()
doResetClocks = .true.
endif
!Data output
@@ -223,6 +224,12 @@ program voltron_mpix
if (vApp%IO%doRestart(vApp%time)) then
call resOutputV(vApp,vApp%gApp)
endif
!Reset clocks last so data is available for all output
if (doResetClocks) then
call cleanClocks()
doResetClocks = .false.
endif
call Toc("IO", .true.)
call Toc("Omega", .true.)
@@ -257,11 +264,11 @@ program voltron_mpix
!Timing info
if (gApp%Model%IO%doTimerOut) call printClocks()
call cleanClocks()
doResetClocks = .true.
elseif (gApp%Model%IO%doTimer(gApp%Model%t)) then
if (gApp%Model%IO%doTimerOut) call printClocks()
call cleanClocks()
doResetClocks = .true.
endif
if (gApp%Model%IO%doOutput(gApp%Model%t)) then
@@ -274,6 +281,11 @@ program voltron_mpix
call gApp%WriteRestart(gApp%Model%IO%nRes)
endif
if (doResetClocks) then
call cleanClocks()
doResetClocks = .false.
endif
call Toc("IO")
call Toc("Omega", .true.)
end do

View File

@@ -11,6 +11,7 @@ program voltronx
type(voltApp_T) :: vApp
real(rp) :: nextDT
logical :: doResetClocks = .false.
call initClocks()
@@ -37,10 +38,10 @@ program voltronx
call consoleOutputV(vApp,vApp%gApp)
!Timing info
if (vApp%IO%doTimerOut) call printClocks()
call cleanClocks()
doResetClocks = .true.
elseif (vApp%IO%doTimer(vApp%time)) then
if (vApp%IO%doTimerOut) call printClocks()
call cleanClocks()
doResetClocks = .true.
endif
!Data output
@@ -51,6 +52,11 @@ program voltronx
if (vApp%IO%doRestart(vApp%time)) then
call resOutputV(vApp,vApp%gApp)
endif
!Reset clocks last
if (doResetClocks) then
call cleanClocks()
doResetClocks = .false.
endif
call Toc("IO", .true.)

View File

@@ -73,6 +73,7 @@ module gamapp
subroutine stepGamera(gameraApp)
class(gamApp_T), intent(inout) :: gameraApp
call Tic("Advance", .true.)
!update the state variables to the next timestep
call UpdateStateData(gameraApp)
@@ -82,11 +83,13 @@ module gamapp
call Toc("DT")
!Enforce BCs
call Tic("BCs")
call Tic("BCs", .true.)
call EnforceBCs(gameraApp%Model,gameraApp%Grid,gameraApp%State)
!Update Bxyz's
call bFlux2Fld (gameraApp%Model,gameraApp%Grid,gameraApp%State%magFlux,gameraApp%State%Bxyz)
call Toc("BCs")
call Toc("BCs", .true.)
call Toc("Advance", .true.)
end subroutine stepGamera

View File

@@ -258,7 +258,7 @@ module gioH5
type(State_T), intent(in) :: State
character(len=*), intent(in) :: gStr
integer :: i,j,k,s
integer :: i,j,k,s,nClkSteps
character(len=strLen) :: dID,VxID,VyID,VzID,PID
integer iMin,iMax,jMin,jMax,kMin,kMax
@@ -413,7 +413,8 @@ module gioH5
!Write current
if (Model%isMagsphere) then
!Subtract dipole before calculating current
!$OMP PARALLEL DO default(shared) collapse(2)
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(i,j,k)
do k=Gr%ksg,Gr%keg
do j=Gr%jsg,Gr%jeg
do i=Gr%isg,Gr%ieg
@@ -440,7 +441,8 @@ module gioH5
if (doFat) then
!Divide by edge-length to go from potential to field
!$OMP PARALLEL DO default(shared) collapse(2)
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(i,j,k)
do k=Gr%ksg,Gr%keg
do j=Gr%jsg,Gr%jeg
do i=Gr%isg,Gr%ieg
@@ -465,19 +467,19 @@ module gioH5
if (Model%doSource .and. Model%isMagsphere) then
!Volt variables
call GameraOut("SrcX1" ,"deg",rad2deg,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,PROJLAT))
call GameraOut("SrcX2" ,"deg",rad2deg,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,PROJLON))
call GameraOut("SrcX1" ,"deg",rad2deg,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,PROJLAT))
call GameraOut("SrcX2" ,"deg",rad2deg,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,PROJLON))
call GameraOut("SrcIONEx" ,gamOut%eID,gamOut%eScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IONEX))
call GameraOut("SrcIONEy" ,gamOut%eID,gamOut%eScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IONEY))
call GameraOut("SrcIONEz" ,gamOut%eID,gamOut%eScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IONEZ))
call GameraOut("SrcIONEx" ,gamOut%eID,gamOut%eScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IONEX))
call GameraOut("SrcIONEy" ,gamOut%eID,gamOut%eScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IONEY))
call GameraOut("SrcIONEz" ,gamOut%eID,gamOut%eScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IONEZ))
!IMAG variables
call GameraOut("SrcD_RING" ,gamOut%dID,gamOut%dScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IM_D_RING))
call GameraOut("SrcP_RING" ,gamOut%pID,gamOut%pScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IM_P_RING))
call GameraOut("SrcD_COLD" ,gamOut%dID,gamOut%dScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IM_D_COLD))
call GameraOut("SrcP_COLD" ,gamOut%pID,gamOut%pScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IM_P_COLD))
call GameraOut("SrcDT" ,"s" ,gamOut%tScl,Gr%Gas0(Gr%is:Gr%ie,Gr%js:Gr%je,Gr%ks:Gr%ke,IM_TSCL ))
call GameraOut("SrcD_RING" ,gamOut%dID,gamOut%dScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IM_D_RING))
call GameraOut("SrcP_RING" ,gamOut%pID,gamOut%pScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IM_P_RING))
call GameraOut("SrcD_COLD" ,gamOut%dID,gamOut%dScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IM_D_COLD))
call GameraOut("SrcP_COLD" ,gamOut%pID,gamOut%pScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IM_P_COLD))
call GameraOut("SrcDT" ,"s" ,gamOut%tScl,Gr%Gas0(iMin:iMax,jMin:jMax,kMin:kMax,IM_TSCL ))
endif
@@ -537,6 +539,18 @@ module gioH5
call AddOutVar(IOVars,"kzcsTOT",Model%kzcsTOT,uStr="kZCs",dStr="Total kZCs" )
!---------------------
!Performance metrics
nClkSteps = readNCalls('Advance')
call AddOutVar(IOVars,"_perf_stepTime",readClock(1)/nClkSteps)
call AddOutVar(IOVars,"_perf_mathTime", readClock('Gamera')/nClkSteps)
call AddOutVar(IOVars,"_perf_bcTime", readClock('BCs')/nClkSteps)
call AddOutVar(IOVars,"_perf_haloTime", readClock('Halos')/nClkSteps)
call AddOutVar(IOVars,"_perf_voltTime", readClock('VoltSync')/nClkSteps)
call AddOutVar(IOVars,"_perf_ioTime", readClock('IO')/nClkSteps)
call AddOutVar(IOVars,"_perf_advanceTime", readClock('Advance')/nClkSteps)
!----------------------
!Call user routine if defined
if (associated(Model%HackIO)) then

View File

@@ -112,7 +112,7 @@ module mhdgroup
call Tic("Reynolds")
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(U,dPg)
!$OMP private(i,j,k,U,dPg)
do k=Grid%ks,Grid%ke
do j=Grid%js,Grid%je
do i=Grid%is,Grid%ie
@@ -169,7 +169,7 @@ module mhdgroup
call Tic("Maxwell")
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(U,oU,B,oB,dPg,dPm)
!$OMP private(i,j,k,U,oU,B,oB,dPg,dPm)
do k=Grid%ks,Grid%ke
do j=Grid%js,Grid%je
do i=Grid%is,Grid%ie
@@ -225,7 +225,8 @@ module mhdgroup
!Let's goddamn do this thing w/ interleaved memory copies
!Open one big-ass // block
!$OMP PARALLEL default(shared)
!$OMP PARALLEL default(shared) &
!$OMP private(i,j,k)
!$OMP DO collapse(2)
do k=Grid%ksg,Grid%keg

View File

@@ -360,6 +360,7 @@ module gamapp_mpi
! print *, 'Over-writing min I BC to be an MPI BC'
deallocate(Grid%externalBCs(INI)%p)
allocate(Grid%externalBCs(INI)%p,source=mpiNullBc_T(INI))
Grid%isDT = Grid%is
endif
END SELECT
@@ -377,6 +378,7 @@ module gamapp_mpi
! print *, 'Over-writing max I BC to be an MPI BC'
deallocate(Grid%externalBCs(OUTI)%p)
allocate(Grid%externalBCs(OUTI)%p,source=mpiNullBc_T(OUTI))
Grid%ieDT = Grid%ie
endif
END SELECT
@@ -399,6 +401,7 @@ module gamapp_mpi
! print *, 'Over-writing min J BC to be an MPI BC'
deallocate(Grid%externalBCs(INJ)%p)
allocate(Grid%externalBCs(INJ)%p,source=mpiNullBc_T(INJ))
Grid%jsDT = Grid%js
endif
END SELECT
@@ -416,6 +419,7 @@ module gamapp_mpi
! print *, 'Over-writing max J BC to be an MPI BC'
deallocate(Grid%externalBCs(OUTJ)%p)
allocate(Grid%externalBCs(OUTJ)%p,source=mpiNullBc_T(OUTJ))
Grid%jeDT = Grid%je
endif
END SELECT
@@ -438,6 +442,7 @@ module gamapp_mpi
! print *, 'Over-writing min K BC to be an MPI BC'
deallocate(Grid%externalBCs(INK)%p)
allocate(Grid%externalBCs(INK)%p,source=mpiNullBc_T(INK))
Grid%ksDT = Grid%ks
endif
END SELECT
@@ -455,6 +460,7 @@ module gamapp_mpi
! print *, 'Over-writing max K BC to be an MPI BC'
deallocate(Grid%externalBCs(OUTK)%p)
allocate(Grid%externalBCs(OUTK)%p,source=mpiNullBc_T(OUTK))
Grid%keDT = Grid%ke
endif
END SELECT
@@ -578,9 +584,9 @@ module gamapp_mpi
character(len=strLen) :: BCID
!Enforce BCs
call Tic("BCs")
call Tic("BCs", .true.)
call EnforceBCs(gamAppMpi%Model,gamAppMpi%Grid,State)
call Toc("BCs")
call Toc("BCs", .true.)
!Track timing for all gamera ranks to finish physical BCs
! Only synchronize when timing
@@ -591,10 +597,10 @@ module gamapp_mpi
endif
!Update ghost cells
call Tic("Halos")
call Tic("Halos", .true.)
call HaloUpdate(gamAppMpi, State)
call bFlux2Fld(gamAppMpi%Model, gamappMpi%Grid, State%magFlux, State%Bxyz) !Update Bxyz's
call Toc("Halos")
call Toc("Halos", .true.)
!Track timing for all gamera ranks to finish halo comms
! Only synchronize when timing
@@ -605,6 +611,7 @@ module gamapp_mpi
endif
! Re-apply periodic BCs last
call Tic("BCs", .true.)
do i=1,gamAppMpi%Grid%NumBC
if(allocated(gamAppMpi%Grid%externalBCs(i)%p)) then
SELECT type(bc=>gamAppMpi%Grid%externalBCs(i)%p)
@@ -643,6 +650,7 @@ module gamapp_mpi
endselect
endif
enddo
call Toc("BCs", .true.)
!Track timing for all gamera ranks to finish periodic BCs
! Only synchronize when timing
@@ -660,6 +668,7 @@ module gamapp_mpi
integer :: ierr,i
real(rp) :: tmp
call Tic("Advance", .true.)
!update the state variables to the next timestep
call UpdateStateData(gamAppMpi)
@@ -671,11 +680,14 @@ module gamapp_mpi
call Toc("Sync Math")
endif
!Update BCs MPI style
call updateMpiBCs(gamAppMpi, gamAppmpi%State)
!Calculate new timestep
call CalcDT_mpi(gamAppMpi)
! *** THIS MUST BE BEFORE updateMpiBCs
! *** CalcDT can adjust cell values via CPR and other emergency correction schemes
!Update BCs MPI style
call updateMpiBCs(gamAppMpi, gamAppmpi%State)
call Toc("Advance", .true.)
end subroutine stepGamera_mpi

View File

@@ -491,8 +491,9 @@ module ringutils
!Map i to itself
ip = i
!Next do k, map via periodicity
!NOTE: This is assuming you have all
!Next do k, map via periodicity. Have to do k first otherwise have to deal w/ right hand becoming left
!NOTE: This is assuming you have all k cells (ie, no mpi decomp in KDIR)
if (k < Grid%ks) then
kp = k + Np
elseif (k > Grid%ke) then
@@ -501,18 +502,19 @@ module ringutils
kp = k
endif
!Finally do j
!Now handle ip,j,kp => ip,jp,kp
jp = j ! default value
if ( Model%Ring%doS .and. (j<Grid%js) ) then
!js-1 => js
jp = Grid%js + (Grid%js-j) - 1
kp = WrapK(k,Np)
kp = WrapK(kp,Np)
endif
if ( Model%Ring%doE .and. (j>Grid%je) ) then
!je+1 => je
jp = Grid%je - (j-Grid%je) + 1
kp = WrapK(k,Np)
kp = WrapK(kp,Np)
endif
end subroutine lfmIJKcc

View File

@@ -34,21 +34,21 @@ module raijuAdvancer
State%dt = dtCpl
call Tic("Pre-Advance")
call Tic("Pre-Advance",.true.)
call raijuPreAdvance(Model, Grid, State)
call Toc("Pre-Advance")
call Toc("Pre-Advance",.true.)
State%isFirstCpl = .false.
! Step
call Tic("AdvanceState")
call Tic("AdvanceState",.true.)
call AdvanceState(Model, Grid, State)
call Toc("AdvanceState")
call Toc("AdvanceState",.true.)
! etas back to moments
call Tic("Moments Eval")
call Tic("Moments Eval",.true.)
call EvalMoments(Grid, State)
call EvalMoments(Grid, State, doAvgO=.true.)
call Toc("Moments Eval")
call Toc("Moments Eval",.true.)
end subroutine raijuAdvance
@@ -111,11 +111,16 @@ module raijuAdvancer
odt = State%dtk(k)
! NOTE: When we actually implement activeShells these will need to be updated within time loop
where (State%active .eq. RAIJUACTIVE)
isGoodEvol = .true.
elsewhere
isGoodEvol = .false.
end where
if (Grid%spc(Grid%k2spc(k))%flav==F_PSPH) then
call calcEvolDom_psph(Model, Grid, State, isGoodEvol)
else
where (State%active .eq. RAIJUACTIVE)
isGoodEvol = .true.
elsewhere
isGoodEvol = .false.
end where
endif
where (State%active .ne. RAIJUINACTIVE)
isGoodRecon = .true.
elsewhere
@@ -284,4 +289,33 @@ module raijuAdvancer
end subroutine calcEtaHalf
subroutine calcEvolDom_psph(Model, Grid, State, isGoodEvol)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T ), intent(in) :: Grid
type(raijuState_T), intent(in) :: State
logical, dimension(Grid%shGrid%isg:Grid%shGrid%ieg, &
Grid%shGrid%jsg:Grid%shGrid%jeg), intent(inout) :: isGoodEvol
integer :: i,j
real(rp) :: Req
isGoodEvol = .false. ! Can't use fillArray yet, no version for logicals
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
Req = sqrt(State%xyzMincc(i,j,XDIR)**2 + State%xyzMincc(i,j,YDIR)**2)
if (Req < Model%psphEvolRad) then
continue ! Keep points within this radius set to no evol
else
if (State%active(i,j) == RAIJUACTIVE) then
isGoodEvol(i,j) = .true.
endif
endif
enddo
enddo
end subroutine
end module raijuAdvancer

View File

@@ -5,6 +5,7 @@ module raijuBCs
use raijutypes
use raijuetautils
use raijudomain
use raijuColdStartHelper
implicit none
@@ -29,12 +30,15 @@ module raijuBCs
doWholeDomain = .false.
endif
! Now that topo is set, we can calculate active domain
call setActiveDomain(Model, Grid, State)
call calcMomentIngestionLocs(Model, Grid, State, doWholeDomain, doMomentIngest)
call applyMomentIngestion(Model, Grid, State, doMomentIngest)
if (State%coldStarter%doCS_next_preAdv) then
call raijuGeoColdStart(Model, Grid, State, State%t, State%coldStarter%modelDst_next_preAdv, doAccumulateO=.true.)
State%coldStarter%doCS_next_preAdv = .false.
endif
if (Model%doActiveShell ) then
! Do first round of determining active shells for each k
@@ -64,9 +68,18 @@ module raijuBCs
doMomentIngest = .false.
! Determine where to do BCs
if(doWholeDomain) then
where (State%active .ne. RAIJUINACTIVE)
doMomentIngest = .true.
end where
!where (State%active .ne. RAIJUINACTIVE)
! doMomentIngest = .true.
!end where
associate(sh=>Grid%shGrid)
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ieg
if (State%active(i,j) .ne. RAIJUINACTIVE) then
doMomentIngest(i,j) = .true.
endif
enddo
enddo
end associate
else
associate(sh=>Grid%shGrid)
@@ -118,8 +131,7 @@ module raijuBCs
psphIdx = spcIdx(Grid, F_PSPH)
eleIdx = spcIdx(Grid, F_HOTE)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,s,fIdx,fm,vm,kT,etaBelow,tmp_kti,tmp_kte,tmp_D,tmp_P)
!$OMP private(i,j,s,fIdx,fm,vm,kT,etaBelow,tmp_kti,tmp_kte,eMin,tmp_D,tmp_P)
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
if (State%active(i,j) .eq. RAIJUINACTIVE) then
@@ -215,8 +227,8 @@ module raijuBCs
press2D = State%Pavg (i-2:i+2, j-2:j+2, fIdx)
wgt2D = State%domWeights(i-2:i+2, j-2:j+2)
isG2D = State%active (i-2:i+2, j-2:j+2) .ne. RAIJUINACTIVE
D = sum(den2D * wgt2D, mask=isG2D)/sum(wgt2D, mask=isG2D)
P = sum(press2D* wgt2D, mask=isG2D)/sum(wgt2D, mask=isG2D)
D = sum(den2D * wgt2D, mask=isG2D)/max( sum(wgt2D, mask=isG2D), TINY)
P = sum(press2D* wgt2D, mask=isG2D)/max( sum(wgt2D, mask=isG2D), TINY)
endif
end subroutine getDomWeightedMoments
@@ -309,4 +321,4 @@ module raijuBCs
end subroutine setActiveShellsByContribution
end module raijuBCs
end module raijuBCs

View File

@@ -4,6 +4,7 @@ module raijuColdStartHelper
use raijutypes
use imaghelper
use earthhelper
use arrayutil
use raijuetautils
use raijuloss_CX
@@ -32,6 +33,8 @@ module raijuColdStartHelper
call iXML%Set_Val(coldStarter%tEnd,'coldStarter/tEnd',coldStarter%evalCadence-TINY) ! Don't do any updates as default
endif
coldStarter%doneFirstCS = .false.
end subroutine initRaijuColdStarter
@@ -41,7 +44,7 @@ module raijuColdStartHelper
! Worker routines
!------
subroutine raijuGeoColdStart(Model, Grid, State, t0, dstModel)
subroutine raijuGeoColdStart(Model, Grid, State, t0, dstModel, doAccumulateO)
!! Cold start RAIJU assuming we are at Earth sometime around 21st century
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
@@ -50,26 +53,31 @@ module raijuColdStartHelper
!! Target time to pull SW values from
real(rp), intent(in) :: dstModel
!! Current dst of global model
logical, optional, intent(in) :: doAccumulateO
!! If true, keep State%eta and add coldstart to it.
!! If false, replace State%eta with coldStart info
!! Default: false
logical :: isFirstCS
logical :: doInitRC, doAccumulate
integer :: i,j,k
integer :: s, sIdx_p, sIdx_e
real(rp) :: dstReal, dstTarget
real(rp) :: dps_current, dps_preCX, dps_postCX, dps_rescale, dps_ele
real(rp) :: etaScale
logical, dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg) :: isGood
logical , dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg) :: isGood
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk) :: etaCS
associate(cs=>State%coldStarter)
!write(*,*)"Coldstart running..."
isFirstCS = .not. State%coldStarter%doneFirstCS
if (.not. isFirstCS .and. .not. cs%doUpdate) then
return
if (present(doAccumulateO)) then
doAccumulate = doAccumulateO
else
doAccumulate = .false.
endif
call fillArray(etaCS, 0.0_rp)
where (State%active .eq. RAIJUACTIVE)
associate(cs=>State%coldStarter)
where (State%active .ne. RAIJUINACTIVE)
isGood = .true.
elsewhere
isGood = .false.
@@ -77,102 +85,154 @@ module raijuColdStartHelper
sIdx_p = spcIdx(Grid, F_HOTP)
sIdx_e = spcIdx(Grid, F_HOTE)
if (isFirstCS) then
! Start by nuking all etas we will set up ourselves
do s=1,Grid%nSpc
!! Skip plasmashere, let that be handled on its own
!if ( Grid%spc(s)%flav == F_PSPH) then
! continue
!endif
State%eta(:,:,Grid%spc(s)%kStart:Grid%spc(s)%kEnd) = 0.0
enddo
endif
!! Init psphere
!if (isFirstCS .or. cs%doPsphUpdate) then
! call setRaijuInitPsphere(Model, Grid, State, Model%psphInitKp)
!endif
! Update Dst target
dstReal = GetSWVal('symh', Model%tsF, t0)
if (isFirstCS) then
if (cs%doneFirstCS) then
write(*,*)"Already coldstarted once, you shouldn't be here "
return
else
! On first try, we assume there is no existing ring current, and its our job to make up the entire difference
dstTarget = dstReal - dstModel
else if (t0 > (cs%lastEval + cs%evalCadence)) then
! If we are updating, there should already be some ring current
! If dstReal - dstModel is still < 0, we need to add ADDITIONAL pressure to get them to match
dps_current = spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_p), isGood) + spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_e), isGood)
dstTarget = dstReal - (dstModel - dps_current)
else
! Otherwise we have nothing to do, just chill til next update time
return
endif
cs%lastEval = t0
cs%lastTarget = dstTarget
cs%doneFirstCS = .true. ! Whether we do anything or not, we were at least called once
if (dstTarget > 0) then ! We got nothing to contribute
! Now decide if we need to add a starter ring current
if (dstTarget >= 0) then ! We've got nothing to contribute
write(*,*)"RAIJU coldstart not adding starter ring current"
return
endif
if (isFirstCS) then
! Init psphere
call setRaijuInitPsphere(Model, Grid, State, Model%psphInitKp)
! Init hot protons
call raiColdStart_initHOTP(Model, Grid, State, t0, dstTarget)
dps_preCX = spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_p), isGood)
! Hit it with some charge exchange
if (cs%doCX) then
call raiColdStart_applyCX(Model, Grid, State, Grid%spc(sIdx_p))
endif
dps_postCX = spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_p), isGood)
! Calc moments to update pressure and density
call EvalMoments(Grid, State)
! Use HOTP moments to set electrons
call raiColdStart_initHOTE(Model, Grid, State)
dps_ele = spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_e), isGood)
dps_current = dps_postCX ! Note: if using fudge we're gonna lose electrons immediately, don't include them in current dst for now
! Init hot protons
call raiColdStart_initHOTP(Model, Grid, State, t0, dstTarget, etaCS)
!call raiColdStart_initHOTP_RCOnly(Model, Grid, State, t0, dstTarget, etaCS)
dps_preCX = spcEta2DPS(Model, Grid, State%bvol_cc, etaCS, Grid%spc(sIdx_p), isGood)
! Hit it with some charge exchange
if (cs%doCX) then
call raiColdStart_applyCX(Model, Grid, State, Grid%spc(sIdx_p), etaCS)
endif
dps_postCX = spcEta2DPS(Model, Grid, State%bvol_cc, etaCS, Grid%spc(sIdx_p), isGood)
! Use HOTP moments to set electrons
call raiColdStart_initHOTE(Model, Grid, State, etaCS)
dps_ele = spcEta2DPS(Model, Grid, State%bvol_cc, etaCS, Grid%spc(sIdx_e), isGood)
dps_current = dps_postCX ! Note: if using fudge we're gonna lose electrons immediately, don't include them in current dst for now
etaScale = abs(dstTarget / dps_current)
State%eta(:,:,Grid%spc(sIdx_p)%kStart:Grid%spc(sIdx_p)%kEnd) = etaScale*State%eta(:,:,Grid%spc(sIdx_p)%kStart:Grid%spc(sIdx_p)%kEnd)
dps_rescale = spcEta2DPS(Model, Grid, State, Grid%spc(sIdx_p), isGood)
if (isfirstCS) then
write(*,*) "RAIJU Cold starting..."
write(*,'(a,f7.2)') " Real Dst : ",dstReal
write(*,'(a,f7.2)') " Model Dst : ",dstModel
write(*,'(a,f7.2)') " Target DPS-Dst : ",dstTarget
write(*,'(a,f7.2)') " Hot proton pre-loss : ",dps_preCX
write(*,'(a,f7.2)') " post-loss : ",dps_postCX
write(*,'(a,f7.2)') " post-rescale : ",dps_rescale
write(*,'(a,f7.2)') " Hot electron DPS-Dst : ",dps_ele
if (dstTarget < 0) then
etaScale = abs(dstTarget / dps_current)
etaCS(:,:,Grid%spc(sIdx_p)%kStart:Grid%spc(sIdx_p)%kEnd) = etaScale*etaCS(:,:,Grid%spc(sIdx_p)%kStart:Grid%spc(sIdx_p)%kEnd)
dps_rescale = spcEta2DPS(Model, Grid, State%bvol_cc, etaCS, Grid%spc(sIdx_p), isGood)
else
write(*,'(a,f7.2)') " Real Dst : ",dstReal
write(*,'(a,f7.2)') " Model Dst : ",dstModel
write(*,'(a,f7.2)') " Current DPS-Dst : ",dps_current
write(*,'(a,f7.2)') " Target DPS-Dst : ",dstTarget
write(*,'(a,f7.2)') " post-rescale : ",dps_rescale
write(*,'(a,f7.2)') " Hot electron DPS-Dst : ",dps_ele
dps_rescale = dps_current
endif
write(*,*) "RAIJU Cold starting..."
write(*,'(a,f7.2)') " Real Dst : ",dstReal
write(*,'(a,f7.2)') " Model Dst : ",dstModel
write(*,'(a,f7.2)') " Target DPS-Dst : ",dstTarget
write(*,'(a,f7.2)') " Hot proton pre-loss : ",dps_preCX
write(*,'(a,f7.2)') " post-loss : ",dps_postCX
write(*,'(a,f7.2)') " post-rescale : ",dps_rescale
write(*,'(a,f7.2)') " Hot electron DPS-Dst : ",dps_ele
end associate
State%coldStarter%doneFirstCS = .true.
! finally, put it into raiju state
if(doAccumulate) then
!$OMP PARALLEL DO default(shared) &
!$OMP private(i,j,k)
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
do k=1,Grid%Nk
if(etaCS(i,j,k) > State%eta(i,j,k)) then
State%eta(i,j,k) = etaCS(i,j,k)
endif
enddo
enddo
enddo
else
State%eta = etaCS
endif
end subroutine raijuGeoColdStart
subroutine raiColdStart_initHOTP(Model, Grid, State, t0, dstTarget)
subroutine raiColdStart_initHOTP_RCOnly(Model, Grid, State, t0, dstTarget, etaCS)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
type(raijuState_T), intent(inout) :: State
type(raijuState_T), intent(in) :: State
real(rp), intent(in) :: t0
!! Target time to pull SW values from
real(rp), intent(in) :: dstTarget
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk), intent(inout) :: etaCS
real(rp) :: dstTarget_p
logical :: isInTM03
integer :: i,j,sIdx
integer, dimension(2) :: ij_TM
real(rp) :: vSW, dSW, dPS_emp, pPS_emp, ktPS_emp
real(rp) :: x0_TM, y0_TM, T0_TM, Bvol0_TM, P0_ps, N0_ps
real(rp) :: L, vm, P_rc, D_rc, kt_rc
sIdx = spcIdx(Grid, F_HOTP)
associate(sh=>Grid%shGrid, spc=>Grid%spc(sIdx))
! Set everything to zero to start
etaCS(:,:,spc%kStart:spc%kEnd) = 0.0_rp
! Scale target Dst down to account for electrons contributing stuff later
dstTarget_p = dstTarget / (1.0 + 1.0/Model%tiote)
call SetQTRC(dstTarget_p,doVerbO=.false.) ! This sets a global QTRC_P0 inside earthhelper.F90
! Get reference TM value at -10 Re
x0_TM = -10.0-TINY
y0_TM = 0.0
! Empirical temperature
call EvalTM03([x0_TM,y0_TM,0.0_rp],N0_ps,P0_ps,isInTM03)
T0_TM = DP2kT(N0_ps, P0_ps)
! Model FTV
ij_TM = minloc( sqrt( (State%xyzMincc(:,:,XDIR)-x0_TM)**2 + (State%xyzMincc(:,:,YDIR)**2) ) )
Bvol0_TM = State%bvol_cc(ij_TM(IDIR), ij_TM(JDIR))
! Now set our initial density and pressure profile
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ie ! Note: Not setting low lat ghosts, we want them to be zero
if (State%active(i,j) .eq. RAIJUINACTIVE) cycle
L = norm2(State%xyzMincc(i,j,XDIR:YDIR))
vm = State%bvol_cc(i,j)**(-2./3.)
kt_rc = T0_TM*(Bvol0_TM/State%bvol_cc(i,j))**(2./3.)
kt_rc = min(kt_rc, 4.0*T0_TM) ! Limit cap. Not a big fan, but without cap we get stuff that's too energetic and won't go away (until FLC maybe)
P_rc = P_QTRC(L) ! From earthhelper.F90
D_rc = PkT2Den(P_rc, kt_rc)
! Finally map it to HOTP etas
call DkT2SpcEta(Model, spc, etaCS(i,j,spc%kStart:spc%kEnd), D_rc, kt_rc, vm)
enddo
enddo
end associate
end subroutine raiColdStart_initHOTP_RCOnly
subroutine raiColdStart_initHOTP(Model, Grid, State, t0, dstTarget, etaCS)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
type(raijuState_T), intent(in) :: State
real(rp), intent(in) :: t0
!! Target time to pull SW values from
real(rp), intent(in) :: dstTarget
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk), intent(inout) :: etaCS
real(rp) :: dstTarget_p
logical :: isInTM03
@@ -192,8 +252,10 @@ module raijuColdStartHelper
call InitTM03(Model%tsF,t0)
! Scale target Dst down to account for electrons contributing stuff later
dstTarget_p = dstTarget / (1.0 + 1.0/Model%tiote)
call SetQTRC(dstTarget_p,doVerbO=.false.) ! This sets a global QTRC_P0 inside earthhelper.F90
if (dstTarget < 0) then
dstTarget_p = dstTarget / (1.0 + 1.0/Model%tiote)
call SetQTRC(dstTarget_p,doVerbO=.false.) ! This sets a global QTRC_P0 inside earthhelper.F90
endif
! Get Borovsky statistical values
vSW = abs(GetSWVal("Vx",Model%tsF,t0))
@@ -218,7 +280,7 @@ module raijuColdStartHelper
endif
! Set everything to zero to start
State%eta(:,:,spc%kStart:spc%kEnd) = 0.0_rp
etaCS(:,:,spc%kStart:spc%kEnd) = 0.0_rp
! Now set our initial density and pressure profile
do j=sh%jsg,sh%jeg
@@ -234,7 +296,11 @@ module raijuColdStartHelper
call EvalTM03_SM(State%xyzMincc(i,j,:),N0_ps,P0_ps,isInTM03)
P_rc = P_QTRC(L) ! From earthhelper.F90
if (dstTarget < 0) then
P_rc = P_QTRC(L) ! From earthhelper.F90
else
P_rc = 0.0
endif
if (.not. isInTM03) then
N0_ps = dPS_emp
@@ -250,7 +316,7 @@ module raijuColdStartHelper
endif
! Finally map it to HOTP etas
call DkT2SpcEta(Model, spc, State%eta(i,j,spc%kStart:spc%kEnd), D_final, kt_rc, vm)
call DkT2SpcEta(Model, spc, etaCS(i,j,spc%kStart:spc%kEnd), D_final, kt_rc, vm)
enddo
enddo
@@ -260,11 +326,12 @@ module raijuColdStartHelper
end subroutine raiColdStart_initHOTP
subroutine raiColdStart_applyCX(Model, Grid, State, spc)
subroutine raiColdStart_applyCX(Model, Grid, State, spc, etaCS)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
type(raijuState_T), intent(inout) :: State
type(raijuState_T), intent(in) :: State
type(raijuSpecies_T), intent(in) :: spc
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk), intent(inout) :: etaCS
integer :: i,j,k
type(raiLoss_CX_T) :: lossCX
@@ -275,13 +342,12 @@ module raijuColdStartHelper
call lossCX%doInit(Model, Grid, nullXML)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,tau)
!$OMP private(i,j,k,tau)
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
do k = spc%kStart,spc%kEnd
tau = lossCX%calcTau(Model, Grid, State, i, j, k)
State%eta(i,j,k) = State%eta(i,j,k)*exp(-tCX/tau)
etaCS(i,j,k) = etaCS(i,j,k)*exp(-tCX/tau)
enddo
enddo
enddo
@@ -289,38 +355,49 @@ module raijuColdStartHelper
end subroutine raiColdStart_applyCX
subroutine raiColdStart_initHOTE(Model, Grid, State)
subroutine raiColdStart_initHOTE(Model, Grid, State, etaCS)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
type(raijuState_T), intent(inout) :: State
type(raijuState_T), intent(in) :: State
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk), intent(inout) :: etaCS
integer :: sIdx_e, sIdx_p
integer :: i,j
real(rp) :: press_p, den_p
real(rp) :: kt_p, kt_e, den, vm
sIdx_p = spcIdx(Grid, F_HOTP)
sIdx_e = spcIdx(Grid, F_HOTE)
associate(spc_p=>Grid%spc(sIdx_p), spc_e=>Grid%spc(sIdx_e))
! Set everything to zero to start
State%eta(:,:,Grid%spc(sIdx_e)%kStart:Grid%spc(sIdx_e)%kEnd) = 0.0_rp
etaCS(:,:,spc_e%kStart:spc_e%kEnd) = 0.0_rp
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,vm,den,kt_p,kt_e)
!$OMP private(i,j,vm,den_p, press_p,kt_p,kt_e)
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ie ! Note: Not setting low lat ghosts, we want them to be zero
if (State%active(i,j) .eq. RAIJUINACTIVE) cycle
vm = State%bvol_cc(i,j)**(-2./3.)
den = State%Den(sIdx_p)%data(i,j)
kt_p = DP2kT(den, State%Press(sIdx_p)%data(i,j))
!den = State%Den(sIdx_p)%data(i,j)
!kt_p = DP2kT(den, State%Press(sIdx_p)%data(i,j))
den_p = SpcEta2Den (spc_p, etaCS(i,j,spc_p%kStart:spc_p%kEnd), State%bvol_cc(i,j))
press_p = SpcEta2Press(spc_p, etaCS(i,j,spc_p%kStart:spc_p%kEnd), State%bvol_cc(i,j))
kt_p = DP2kT(den_p, press_p)
kt_e = kt_p / Model%tiote
call DkT2SpcEta(Model, Grid%spc(sIdx_e), &
State%eta(i,j,Grid%spc(sIdx_e)%kStart:Grid%spc(sIdx_e)%kEnd), &
den, kt_e, vm)
etaCS(i,j,Grid%spc(sIdx_e)%kStart:Grid%spc(sIdx_e)%kEnd), &
den_p, kt_e, vm)
enddo
enddo
end associate
end subroutine raiColdStart_initHOTE

View File

@@ -34,6 +34,10 @@ module raijuDomain
State%active = RAIJUACTIVE
end where
! HOWEVER...
! Force all cells below X Re to be active, with enough buffer cells, and suffer the consequences if that includes bad flux tubes
call forceActiveWithinRadius(Model, Grid, State, State%active)
end subroutine setActiveDomain
@@ -572,4 +576,33 @@ module raijuDomain
end subroutine calcCornerNormAngles
subroutine forceActiveWithinRadius(Model, Grid, State, active)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T ), intent(in) :: Grid
type(raijuState_T), intent(in) :: State
integer, dimension(Grid%shGrid%isg:Grid%shGrid%ieg,Grid%shGrid%jsg:Grid%shGrid%jeg), intent(inout) :: active
integer :: i,j
real(rp) :: rad_next
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
i = Grid%shGrid%ie
rad_next = sqrt(State%xyzMincc(i-1,j,XDIR)**2 + State%xyzMincc(i-1,j,YDIR)**2)
do while(rad_next < Model%activeDomRad)
i = i - 1
rad_next = sqrt(State%xyzMincc(i-1,j,XDIR)**2 + State%xyzMincc(i-1,j,YDIR)**2)
enddo
! i is now the last cell in this j within activeDomRad
if (State%active(i,j) .ne. RAIJUACTIVE) then
! Something bad happened, active domain is really small
! Throw caution to the wind and make it active anyways, good luck everybody
active(i :Grid%shGrid%ieg ,j) = RAIJUACTIVE
active(i-Grid%nB-1 :i-1 ,j) = RAIJUBUFFER
active(Grid%shGrid%isg:i-Grid%nB-2 ,j) = RAIJUINACTIVE
endif
enddo
end subroutine forceActiveWithinRadius
end module raijuDomain

View File

@@ -178,18 +178,19 @@ module raijuetautils
end function SpcEta2Press
function spcEta2DPS(Model, Grid, State, spc, isGood) result(dpsdst)
function spcEta2DPS(Model, Grid, bVol_cc, eta, spc, isGood) result(dpsdst)
!! Calculate total DPS-Dst for given species within the defined isGood domain
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T), intent(in) :: Grid
type(raijuState_T), intent(in) :: State
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg), intent(in) :: bVol_cc
real(rp), dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg, Grid%Nk), intent(in) :: eta
type(raijuSpecies_T), intent(in) :: spc
logical, dimension(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%shGrid%jsg:Grid%shGrid%jeg), intent(in) :: isGood
!! Eval mask, true = point is included in calculation
real(rp) :: dpsdst
integer :: i,j,k
real(rp) :: press, bVol, energyDen, energy
integer :: i,j
real(rp) :: press, energyDen, energy
logical :: isDead = .false.
dpsdst = 0.0
@@ -197,9 +198,8 @@ module raijuetautils
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
if (.not. isGood(i,j)) cycle
bVol = State%bvol_cc(i,j)
press = SpcEta2Press(spc, State%eta(i,j,spc%kStart:spc%kEnd), bVol) ! [nPa]
energyDen = (press*1.0D-9) * (bVol*Model%planet%rp_m*1.0D9) * (Grid%Brcc(i,j)*1.0D-9)/kev2J ! p[J/m^3] * bVol[m/T] * B[T] = [J/m^2] * keV/J = [keV/m^2]
press = SpcEta2Press(spc, eta(i,j,spc%kStart:spc%kEnd), bvol_cc(i,j)) ! [nPa]
energyDen = (press*1.0D-9) * (bVol_cc(i,j)*Model%planet%rp_m*1.0D9) * (Grid%Brcc(i,j)*1.0D-9)/kev2J ! p[J/m^3] * bVol[m/T] * B[T] = [J/m^2] * keV/J = [keV/m^2]
energy = energyDen*(Grid%areaCC(i,j)*Model%planet%ri_m**2) ! [keV/m^2]* Re^2[m^2] = [keV]
dpsdst = dpsdst - 4.2*(1.0D-30)*energy ! [nT]
enddo
@@ -420,4 +420,61 @@ module raijuetautils
end function getInitPsphere
!Do plasmasphere refilling for the interval we're about to advance
subroutine plasmasphereRefill(Model,Grid,State)
type(raijuModel_T), intent(in) :: Model
type(raijuGrid_T) , intent(in) :: Grid
type(raijuState_T), intent(inout) :: State
integer :: i,j,k0
real(rp), parameter :: maxX = 2.0 !Max over-filling relative to target, i.e. don't go above maxX x den-target
real(rp), parameter :: day2s = 24.0*60.0*60,s2day=1.0/day2s
real(rp) :: NowKp
real(rp) :: xeq,yeq,rad,cc2eta,eta2cc
real(rp) :: dppT,dpsph,etaT,dndt,deta
k0 = Grid%spc(spcIdx(Grid, F_PSPH))%kStart !plasmasphere index
NowKp = State%KpTS%evalAt(State%t)
!$OMP PARALLEL DO default(shared) &
!$OMP private(i,j,xeq,yeq,rad,dppT,cc2eta,eta2cc) &
!$OMP private(dpsph,etaT,deta,dndt)
do j=Grid%shGrid%jsg,Grid%shGrid%jeg
do i=Grid%shGrid%isg,Grid%shGrid%ieg
if (State%active(i,j) == RAIJUACTIVE) then
xeq = State%xyzMincc(i,j,XDIR)
yeq = State%xyzMincc(i,j,YDIR)
rad = sqrt(xeq**2.0 + yeq**2.0)
!Closed field line, calculate Gallagher w/ current Kp to get target density
dppT = GallagherXY(xeq,yeq,NowKp)
cc2eta = State%bvol_cc(i,j)*sclEta
eta2cc = 1.0/cc2eta !Convert eta to #/cc
dpsph = eta2cc*State%eta(i,j,k0) !Current plasmasphere density [#/cc]
!Check for other outs before doing anything
if (dpsph >= maxX*dppT) then
continue ! Too much already there, don't touch it
else
etaT = dppT/eta2cc
if ((rad <= Model%psphEvolRad) .and. (dppT > dpsph)) then
!If this is inside MHD inner boundary, be at least at target value
State%eta(i,j,k0) = etaT
else
!If still here then calculate refilling
dndt = 10.0**(3.48-0.331*rad) !cm^-3/day, Denton+ 2012 eqn 1
deta = (State%dt*s2day)*dndt/eta2cc !Change in eta over dt
State%eta(i,j,k0) = State%eta(i,j,k0) + deta
endif
endif
endif
enddo !i
enddo !j
end subroutine plasmasphereRefill
end module raijuetautils

View File

@@ -16,7 +16,7 @@ module raijuIO
implicit none
integer, parameter, private :: MAXIOVAR = 70
integer, parameter, private :: MAXIOVAR = 100
logical, private :: doRoot = .true. !Whether root variables need to be written
logical, private :: doFat = .false. !Whether to output lots of extra datalogical, private :: doRoot = .true. !Whether root variables need to be written
@@ -154,7 +154,7 @@ module raijuIO
logical, optional, intent(in) :: doGhostsO
type(IOVAR_T), dimension(MAXIOVAR) :: IOVars
integer :: i,j,k,s
integer :: i,j,k,s, nClkSteps
integer :: is, ie, js, je, ks, ke
integer, dimension(4) :: outBnds2D
logical :: doGhosts
@@ -275,7 +275,7 @@ module raijuIO
call AddOutVar(IOVars,"pot_corot",State%pot_corot(is:ie+1,js:je+1),uStr="kV",dStr="(corners) Corotation potential")
! Idk about you but I did not expect true to equal -1
allocate(outTmp2D(is:ie, Grid%Nk))
allocate(outTmp2D(Grid%shGrid%isg:Grid%shGrid%ieg, Grid%Nk))
where (State%activeShells)
outTmp2D = 1.0
elsewhere
@@ -421,10 +421,17 @@ module raijuIO
allocate(outTmp2D(Grid%shGrid%isg:Grid%shGrid%ieg ,Grid%shGrid%jsg:Grid%shGrid%jeg))
call calcMapJacNorm(Grid, State%xyzMin, outTmp2D)
call AddOutVar(IOVars, "mapJacNorm", outTmp2D(is:ie,js:je), dStr="L_(2,1) norm of lat/lon => xyzMin Jacobian")
deallocate(outTmp2D)
endif
call WriteVars(IOVars,.true.,Model%raijuH5, gStr)
!Performance Metrics
nClkSteps = readNCalls('DeepUpdate')
call AddOutVar(IOVars, "_perf_stepTime", readClock(1)/nClkSteps)
call AddOutVar(IOVars, "_perf_preAdvance", readClock("Pre-Advance")/nClkSteps)
call AddOutVar(IOVars, "_perf_advanceState", readClock("AdvanceState")/nClkSteps)
call AddOutVar(IOVars, "_perf_moments", readClock("Moments Eval")/nClkSteps)
call WriteVars(IOVars,.true.,Model%raijuH5, gStr)
! Any extra groups to add
if (Model%doLosses .and. Model%doOutput_3DLoss) then
@@ -496,6 +503,12 @@ module raijuIO
else
call AddOutVar(IOVars,"cs_doneFirstCS", 0)
endif
if (State%coldStarter%doCS_next_preAdv) then
call AddOutVar(IOVars,"cs_doCS_next_preAdv", 1)
else
call AddOutVar(IOVars,"cs_doCS_next_preAdv", 0)
endif
call AddOutVar(IOVars, "cs_modelDst_next_preAdv", State%coldStarter%modelDst_next_preAdv)
call AddOutVar(IOVars, "cs_lastEval", State%coldStarter%lastEval)
call AddOutVar(IOVars, "cs_lastTarget", State%coldStarter%lastTarget)
@@ -591,6 +604,8 @@ module raijuIO
call AddInVar(IOVars,"nRes")
call AddInVar(IOVars,"tRes")
call AddInVar(IOVars,"isFirstCpl")
call AddInVar(IOVars,"cs_doCS_next_preAdv")
call AddInVar(IOVars,"cs_modelDst_next_preAdv")
call AddInVar(IOVars,"cs_doneFirstCS")
call AddInVar(IOVars,"cs_lastEval")
call AddInVar(IOVars,"cs_lastTarget")
@@ -644,12 +659,15 @@ module raijuIO
! Coldstarter
State%coldStarter%lastEval = GetIOReal(IOVars, "cs_lastEval")
State%coldStarter%lastTarget = GetIOReal(IOVars, "cs_lastTarget")
State%coldStarter%modelDst_next_preAdv = GetIOReal(IOVars, "cs_modelDst_next_preAdv")
! Handle boolean attributes
tmpInt = GetIOInt(IOVars, "isFirstCpl")
State%isFirstCpl = tmpInt .eq. 1
tmpInt = GetIOInt(IOVars, "cs_doneFirstCS")
State%coldStarter%doneFirstCS = tmpInt .eq. 1
tmpInt = GetIOInt(IOVars, "cs_doCS_next_preAdv")
State%coldStarter%doCS_next_preAdv = tmpInt .eq. 1
call IOArray2DFill(IOVars, "xmin" , State%xyzMin(:,:,XDIR))
call IOArray2DFill(IOVars, "ymin" , State%xyzMin(:,:,YDIR))

View File

@@ -78,7 +78,6 @@ module raijuOut
State%IO%tRes = State%IO%tRes + State%IO%dtRes
State%IO%nRes = State%IO%nRes + 1
end subroutine raijuResInput
@@ -132,7 +131,7 @@ module raijuOut
if (maxP_MLT > 24) maxP_MLT = maxP_MLT - 24D0
write(*,'(a,I0,a,f7.2,a,f7.2,a,f5.2,a,f5.2,a,f7.2)') ' ', &
Grid%spc(s)%flav, ': P =', maxPress,', D =',maxDen,' @ ',maxP_L,' Rp,',maxP_MLT, &
" MLT; DPS:",spcEta2DPS(Model, Grid, State, Grid%spc(sIdx), State%active .eq. RAIJUACTIVE)
" MLT; DPS:",spcEta2DPS(Model, Grid, State%bvol_cc, State%eta_avg, Grid%spc(sIdx), State%active .eq. RAIJUACTIVE)
enddo
write(*,'(a)',advance="no") ANSIRESET

View File

@@ -36,11 +36,20 @@ module raijuPreAdvancer
call fillArray(State%eta_avg, 0.0_rp)
! (losses handled in updateRaiLosses)
! Now that topo is set, we can calculate active domain
call setActiveDomain(Model, Grid, State)
! Moments to etas, initial active shell calculation
call Tic("BCs")
call applyRaijuBCs(Model, Grid, State, doWholeDomainO=State%isFirstCpl) ! If fullEtaMap=True, mom2eta map is applied to the whole domain
if (State%isFirstCpl) then
call setRaijuInitPsphere(Model, Grid, State, Model%psphInitKp)
endif
call Toc("BCs")
! Handle plasmasphere refilling for the full step about to happen
call plasmasphereRefill(Model,Grid,State)
! Handle edge cases that may effect the validity of information carried over from last coupling period
call prepEtaLast(Grid%shGrid, State, State%isFirstCpl)
@@ -258,7 +267,7 @@ module raijuPreAdvancer
associate(sh=>Grid%shGrid)
! Gauss-Green calculation of cell-averaged gradients
call potExB(Grid%shGrid, State, pExB, doSmoothO=.true., isGCornerO=isGCorner) ! [V]
call potExB(Grid%shGrid, State, pExB, doSmoothO=Model%doSmoothGrads, isGCornerO=isGCorner) ! [V]
call potCorot(Model%planet, Grid%shGrid, pCorot, Model%doGeoCorot) ! [V]
call calcGradIJ_cc(Model%planet%rp_m, Grid, isGCorner, pExB , State%gradPotE_cc , doLimO=.true. ) ! [V/m]
call calcGradIJ_cc(Model%planet%rp_m, Grid, isGCorner, pCorot, State%gradPotCorot_cc, doLimO=.false.) ! [V/m]
@@ -267,7 +276,7 @@ module raijuPreAdvancer
! lambda is constant, so just need grad(V^(-2/3) )
call calcGradVM_cc(Model%planet%rp_m, Model%planet%ri_m, Model%planet%magMoment, &
Grid, isGCorner, State%bvol, State%gradVM_cc, &
doSmoothO=.true., doLimO=.true.)
doSmoothO=Model%doSmoothGrads, doLimO=.true.)
end associate
end subroutine calcPotGrads_cc
@@ -301,7 +310,6 @@ module raijuPreAdvancer
associate(sh=>Grid%shGrid)
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,qLow,qHigh)
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ieg
@@ -329,7 +337,6 @@ module raijuPreAdvancer
allocate(gradQtmp(sh%isg:sh%ieg,sh%jsg:sh%jeg, 2))
gradQtmp = gradQ
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=sh%jsg+1,sh%jeg-1
do i=sh%isg+1,sh%ieg-1
@@ -480,13 +487,19 @@ module raijuPreAdvancer
gradVM(:,:,RAI_TH) = gradVM(:,:,RAI_TH) + dV0_dth
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,bVolcc)
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ieg
bVolcc = toCenter2D(dV(i:i+1,j:j+1)) + DipFTV_colat(Grid%thcRp(i), B0) ! Will include smoothing of dV if enabled
!bVolcc = toCenter2D(V(i:i+1,j:j+1))
gradVM(i,j,:) = (-2./3.)*bVolcc**(-5./3.)*gradVM(i,j,:)
if(all(isGcorner(i:i+1,j:j+1))) then
!bVolcc = toCenter2D(dV(i:i+1,j:j+1)) + DipFTV_colat(Grid%thcRp(i), B0) ! Will include smoothing of dV if enabled
bVolcc = toCenter2D(V(i:i+1,j:j+1))
gradVM(i,j,:) = (-2./3.)*bVolcc**(-5./3.)*gradVM(i,j,:)
else
! gradVM should be zero for this point coming out of calcGradIJ_cc, but set to dipole value just in case
gradVM(i,j,RAI_PH) = 0.0
gradVM(i,j,RAI_TH) = 0.0
!gradVM(i,j,RAI_TH) = (-2./3.)*DipFTV_colat(Grid%thcRp(i), B0)**(-5./3.)*dV0_dth(i,j)
endif
enddo
enddo
@@ -561,7 +574,6 @@ module raijuPreAdvancer
enddo
! Now everyone else
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j)
do j=jsg+1,jeg
do i=isg+1,ieg
@@ -569,6 +581,8 @@ module raijuPreAdvancer
enddo
enddo
call wrapJcorners(sh, Vsm)
! Write back to provided array
V = Vsm
end associate

View File

@@ -5,6 +5,7 @@ module raijustarter
use shellgrid
use xml_input
use planethelper
use arrayutil
! Raiju
use raijudefs
@@ -143,6 +144,7 @@ module raijustarter
!--- Plasmasphere ---!
call iXML%Set_Val(Model%doPlasmasphere, "plasmasphere/doPsphere",.true.)
call iXML%Set_Val(Model%doPsphEvol, 'plasmasphere/doEvol',.true.)
call iXML%Set_Val(Model%psphEvolRad, 'plasmasphere/evolRad', def_psphEvolRad)
! Determine number of species. First set default, then read from xml to overwrite if present
if (Model%doPlasmasphere) then
Model%nSpc = 3
@@ -164,13 +166,16 @@ module raijustarter
call iXML%Set_Val(Model%maxSun_buffer , "domain/sun_buffer" , def_maxSun_buffer)
call iXML%Set_Val(Model%maxTail_active, "domain/tail_active", def_maxTail_active)
call iXML%Set_Val(Model%maxSun_active , "domain/sun_active" , def_maxSun_active)
call iXML%Set_Val(Model%activeDomRad , "domain/activeRad" , def_activeDomRad)
! Store all distances as positive values, we'll add signs as needed later
Model%maxTail_buffer = abs(Model%maxTail_buffer)
Model%maxSun_buffer = abs(Model%maxSun_buffer)
Model%maxTail_active = abs(Model%maxTail_active)
Model%maxSun_active = abs(Model%maxSun_active)
Model%activeDomRad = abs(Model%activeDomRad)
!---Solver ---!
call iXML%Set_Val(Model%doSmoothGrads,'sim/doSmoothGrads',def_doSmoothGrads)
call iXML%Set_Val(Model%doUseVelLRs,'sim/useVelLRs',def_doUseVelLRs)
call iXML%Set_Val(Model%maxItersPerSec,'sim/maxIter',def_maxItersPerSec)
call iXML%Set_Val(Model%maxOrder,'sim/maxOrder',7)
@@ -379,71 +384,116 @@ module raijustarter
! dt for every lambda channel
allocate( State%dtk (Grid%Nk) )
call fillArray(State%dtk, 0.0_rp)
! nSteps for each channel
allocate( State%nStepk(Grid%Nk) )
call fillArray(State%nStepk, 0)
! Where we keep all our stuff
allocate( State%eta (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%eta, 0.0_rp)
! Where we keep all our stuff but a half-step ahead of now
allocate( State%eta_half (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%eta_half, 0.0_rp)
! Where we kept all our stuff one step ago
allocate( State%eta_last (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%eta_last, 0.0_rp)
! Where all the stuff sorta was over the last State%dt
allocate( State%eta_avg (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%eta_avg, 0.0_rp)
! I shells shat should be evolved for each k
allocate( State%activeShells (sh%isg:sh%ieg, Grid%Nk) )
State%activeShells = .false.
! Effective potential (used for output only)
allocate( State%pEff(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk) )
call fillArray(State%pEff, 0.0_rp)
! Gradient of ionspheric potential
allocate( State%gradPotE (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, 2) )
call fillArray(State%gradPotE, 0.0_rp)
! Gradient of corotation potential
allocate( State%gradPotCorot (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, 2) )
call fillArray(State%gradPotCorot, 0.0_rp)
! Gradient of (flux tube volume ^ -2/3)
allocate( State%gradVM (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, 2) )
call fillArray(State%gradVM, 0.0_rp)
! Interface and cell velocities
allocate( State%gradPotE_cc (sh%isg:sh%ieg, sh%jsg:sh%jeg, 2) )
call fillArray(State%gradPotE_cc, 0.0_rp)
allocate( State%gradPotCorot_cc(sh%isg:sh%ieg, sh%jsg:sh%jeg, 2) )
call fillArray(State%gradPotCorot_cc, 0.0_rp)
allocate( State%gradVM_cc (sh%isg:sh%ieg, sh%jsg:sh%jeg, 2) )
call fillArray(State%gradVM_cc, 0.0_rp)
allocate( State%iVel (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%iVel, 0.0_rp)
allocate( State%iVelL(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%iVelL, 0.0_rp)
allocate( State%iVelR(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%iVelR, 0.0_rp)
allocate( State%cVel (sh%isg:sh%ieg , sh%jsg:sh%jeg , Grid%Nk, 2) )
call fillArray(State%cVel, 0.0_rp)
! Coupling input moments
allocate( State%Pavg(sh%isg:sh%ieg , sh%jsg:sh%jeg, 0:Grid%nFluidIn) )
call fillArray(State%Pavg, 0.0_rp)
allocate( State%Davg(sh%isg:sh%ieg , sh%jsg:sh%jeg, 0:Grid%nFluidIn) )
call fillArray(State%Davg, 0.0_rp)
allocate( State%Pstd(sh%isg:sh%ieg , sh%jsg:sh%jeg, 0:Grid%nFluidIn) )
call fillArray(State%Pstd, 0.0_rp)
allocate( State%Dstd(sh%isg:sh%ieg , sh%jsg:sh%jeg, 0:Grid%nFluidIn) )
call fillArray(State%Dstd, 0.0_rp)
allocate( State%domWeights(sh%isg:sh%ieg , sh%jsg:sh%jeg) )
call fillArray(State%domWeights, 0.0_rp)
allocate( State%tiote(sh%isg:sh%ieg , sh%jsg:sh%jeg) )
call fillArray(State%tiote, 0.0_rp)
call initShellVar(Grid%shGrid, SHGR_CC, State%Tb)
State%Tb%data = 0.0
State%Tb%mask = .false.
! Bmin surface
allocate( State%Bmin (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, 3 ) )
call fillArray(State%Bmin, 0.0_rp)
allocate( State%xyzMin (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, 3 ) )
call fillArray(State%xyzMin, 0.0_rp)
allocate( State%xyzMincc(sh%isg:sh%ieg , sh%jsg:sh%jeg , 3 ) )
call fillArray(State%xyzMincc, 0.0_rp)
allocate( State%thcon (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1 ) )
call fillArray(State%thcon, 0.0_rp)
allocate( State%phcon (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1 ) )
call fillArray(State%phcon, 0.0_rp)
! 2D corner quantities
allocate( State%topo (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1) )
call fillArray(State%topo, 0)
allocate( State%espot (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1) )
call fillArray(State%espot, 0.0_rp)
allocate( State%pot_corot(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1) )
call fillArray(State%pot_corot, 0.0_rp)
allocate( State%bvol (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1) )
call fillArray(State%bvol, 0.0_rp)
allocate( State%bvol_cc (sh%isg:sh%ieg , sh%jsg:sh%jeg ) )
call fillArray(State%bvol_cc, 0.0_rp)
allocate( State%vaFrac (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1) )
call fillArray(State%vaFrac, 0.0_rp)
! 1D cell-centered quantities
allocate( State%bndLoc(sh%jsg:sh%jeg) )
call fillArray(State%bndLoc, 0)
! 2D cell-centered quantities
allocate( State%active (sh%isg:sh%ieg, sh%jsg:sh%jeg) )
call fillArray(State%active, 0)
allocate( State%active_last (sh%isg:sh%ieg, sh%jsg:sh%jeg) )
call fillArray(State%active_last, 0)
allocate( State%OCBDist(sh%isg:sh%ieg, sh%jsg:sh%jeg) )
call fillArray(State%OCBDist, 0)
allocate( State%lossRates (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%lossRates, 0.0_rp)
allocate( State%precipType_ele (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%precipType_ele, 0.0_rp)
allocate( State%lossRatesPrecip(sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%lossRatesPrecip, 0.0_rp)
!allocate( State%precipNFlux (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
!allocate( State%precipEFlux (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
allocate( State%dEta_dt (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%dEta_dt, 0.0_rp)
allocate( State%CCHeatFlux (sh%isg:sh%ieg, sh%jsg:sh%jeg, Grid%Nk) )
call fillArray(State%CCHeatFlux, 0.0_rp)
! Coupling output data
allocate(State%Den (0:Model%nSpc))
allocate(State%Press(0:Model%nSpc))
@@ -478,15 +528,22 @@ module raijustarter
! Only bother allocating persistent versions of debug stuff if we need them
if (Model%doOutput_debug) then
allocate( State%etaFaceReconL(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%etaFaceReconL, 0.0_rp)
allocate( State%etaFaceReconR(sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%etaFaceReconR, 0.0_rp)
allocate( State%etaFacePDML (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%etaFacePDML, 0.0_rp)
allocate( State%etaFacePDMR (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%etaFacePDMR, 0.0_rp)
allocate( State%etaFlux (sh%isg:sh%ieg+1, sh%jsg:sh%jeg+1, Grid%Nk, 2) )
call fillArray(State%etaFlux, 0.0_rp)
endif
State%KpTS%wID = Model%tsF
call State%KpTS%initTS("Kp")
end associate
! For now, just set t to tStart and ts to 0
State%t = Model%t0
State%ts = 0
@@ -497,6 +554,8 @@ module raijustarter
! Similarly, set vaFrac to safe value in case stand-alone never writes to it
State%vaFrac = 1.0
State%isFirstCpl = .true.
! Init State sub-modules
if (Model%isSA) then
! If we are standalone, this is the place to get coldStarter settings

View File

@@ -93,7 +93,7 @@ module raijulosses
integer :: k
logical, dimension(Grid%shGrid%isg:Grid%shGrid%ieg,Grid%shGrid%jsg:Grid%shGrid%jeg) :: isGood
where (State%active .eq. RAIJUACTIVE)
where (State%active .ne. RAIJUINACTIVE)
isGood = .true.
elsewhere
isGood = .false.

View File

@@ -167,7 +167,6 @@ module mixmain
call dragonking_total(I(h)%aurora,I(h)%G,I(h)%St,I(h)%conductance)
if (present(gcm)) then
write(*,*) 'doGCM!'
call conductance_total(I(h)%conductance,I(h)%G,I(h)%St,gcm,h)
else
!write(*,*) "conductance: total"

View File

@@ -144,7 +144,7 @@ module mixparams
call xmlInp%Set_Val(Params%apply_cap,"conductance/apply_cap",.true.)
call xmlInp%Set_Val(Params%gamma,"precipitation/gamma",5.0/3.0)
call xmlInp%Set_Val(Params%doEMA,"conductance/doEMA",.true.)
call xmlInp%Set_Val(Params%doET,"conductance/doET",.true.)
call xmlInp%Set_Val(Params%doET,"conductance/doET",.false.)
! if this is on, Params%F107 read from xml file above will be overwritten
! and F107 will be set from the SW file (i.e., from OMNI data)

View File

@@ -29,6 +29,7 @@ module uservoltic
real(rp), private :: Rho0,P0
real(rp), private :: Kp0
logical , private :: doPP0 !Use MF starter plasmasphere
logical , private :: writeBCData ! Whether to write IC BC data
! type for remix BC
type, extends(innerIBC_T) :: IonInnerBC_T
@@ -58,6 +59,7 @@ module uservoltic
procedure(HackStep_T), pointer :: tsHack
procedure(HackSaveRestart_T), pointer :: saveResHack
procedure(HackLoadRestart_T), pointer :: loadResHack
procedure(HackIO_T), pointer :: saveIOHack
real(rp) :: M0g
integer :: s,s0
@@ -68,10 +70,12 @@ module uservoltic
tsHack => NULL()
saveResHack => NULL()
loadResHack => NULL()
saveIOHack => NULL()
Model%HackE => eHack
Model%HackStep => tsHack
Model%HackSaveRestart => saveResHack
Model%HackLoadRestart => loadResHack
Model%HackIO => saveIOHack
!Get defaults from input deck
@@ -79,6 +83,7 @@ module uservoltic
call inpXML%Set_Val(Rho0 ,"prob/Rho0",0.2_rp)
call inpXML%Set_Val(P0 ,"prob/P0" ,0.001_rp)
call inpXML%Set_Val(doPP0,"prob/doPP0",.false.)
call inpXML%Set_Val(writeBCData,"prob/writeBC",.false.)
!Set magnetosphere parameters
call setMagsphere(Model,inpXML)
@@ -159,6 +164,9 @@ module uservoltic
Model%HackLoadRestart => loadResHack
saveResHack => SaveUserRes
Model%HackSaveRestart => saveResHack
saveIOHack => SaveUserIO
Model%HackIO => saveIOHack
!Local functions
!NOTE: Don't put BCs here as they won't be visible after the initialization call
@@ -603,13 +611,13 @@ module uservoltic
nbc = FindBC(Model,Grid,INI)
SELECT type(iiBC=>Grid%externalBCs(nbc)%p)
TYPE IS (IonInnerBC_T)
if(ioExist(inH5,"inEijk")) then
if(ioExist(inH5,"_inEijk")) then
call ClearIO(IOVars)
call AddInVar(IOVars,"inEijk")
call AddInVar(IOVars,"inExyz")
call AddInVar(IOVars,"_inEijk")
call AddInVar(IOVars,"_inExyz")
call ReadVars(IOVars,.false.,inH5)
call IOArray4DFill(IOVars, "inEijk", iiBC%inEijk(:,:,:,:) )
call IOArray4DFill(IOVars, "inExyz", iiBC%inExyz(:,:,:,:) )
call IOArray4DFill(IOVars, "_inEijk", iiBC%inEijk(:,:,:,:) )
call IOArray4DFill(IOVars, "_inExyz", iiBC%inExyz(:,:,:,:) )
endif
CLASS DEFAULT
! do nothing on gamera ranks without this BC
@@ -627,18 +635,47 @@ module uservoltic
integer :: nbc
if (.not. writeBCData) return
if ( Grid%hasLowerBC(IDIR) ) then
nbc = FindBC(Model,Grid,INI)
SELECT type(iiBC=>Grid%externalBCs(nbc)%p)
TYPE IS (IonInnerBC_T)
call AddOutVar(IOVars, "inEijk", iiBC%inEijk(:,:,:,:) )
call AddOutVar(IOVars, "inExyz", iiBC%inExyz(:,:,:,:) )
call AddOutVar(IOVars, "_inEijk", iiBC%inEijk(:,:,:,:) )
call AddOutVar(IOVars, "_inExyz", iiBC%inExyz(:,:,:,:) )
CLASS DEFAULT
! do nothing on gamera ranks without this BC
END SELECT
endif
end subroutine SaveUserRes
subroutine SaveUserIO(Model,Grid,State,IOVars)
class(Model_T), intent(in) :: Model
class(Grid_T) , intent(in) :: Grid
class(State_T), intent(in) :: State
type(IOVAR_T), dimension(:), intent(inout) :: IOVars
integer :: nbc
if (.not. writeBCData) return
if ( Grid%hasLowerBC(IDIR) ) then
nbc = FindBC(Model,Grid,INI)
SELECT type(iiBC=>Grid%externalBCs(nbc)%p)
TYPE IS (IonInnerBC_T)
if (writeGhosts) then
!call AddOutVar(IOVars, "_inEijk", iiBC%inEijk(:,:,:,:) )
call AddOutVar(IOVars, "_inExyz", iiBC%inExyz(:,:,:,:) )
else
!call AddOutVar(IOVars, "_inEijk", iiBC%inEijk(:,Grid%js:Grid%je+1,Grid%ks:Grid%ke+1,:) )
call AddOutVar(IOVars, "_inExyz", iiBC%inExyz(:,Grid%js:Grid%je,Grid%ks:Grid%ke,:) )
endif
CLASS DEFAULT
! do nothing on gamera ranks without this BC
END SELECT
endif
end subroutine SaveUserIO
end module uservoltic

View File

@@ -422,8 +422,8 @@ module ebsquish
return
endif
!Do quick short-cut to save us some effort
isGood = inShueMP_SM(xyz,ShueScl)
!Can add short circuit code here, but nothing for now
isGood = .true. !Let's be optimistic
if (.not. isGood) return
! trap for when we're within epsilon of the inner boundary

View File

@@ -1,257 +0,0 @@
!Various routines to handle flux-tube calculations to feed inner magnetosphere models
module imagtubes
use volttypes
use voltcpltypes
use streamline
use chmpdbz, ONLY : DipoleB0
use imaghelper
use planethelper
use shellGrid
implicit none
contains
!subroutine init_TubeShell(sh, tubeShell, maskO, TioTeO)
subroutine init_TubeShell(sh, tubeShell, TioTeO)
type(ShellGrid_T), intent(in) :: sh
!! Voltron shellgrid
type(TubeShell_T), intent(inout) :: tubeShell
!! IMAGTubeShell object we are initializing
!logical, dimension(:,:), intent(in), optional :: maskO
real(rp), intent(in), optional :: TioteO
!! Default Ti/Te ratio
integer :: i
real(rp) :: tiote
if (present(TioteO)) then
tiote = TioteO
else
tiote = 4.0
endif
do i=1,NDIM
call initShellVar(sh, SHGR_CORNER, tubeShell%xyz0(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%X_bmin(i))
enddo
call initShellVar(sh, SHGR_CORNER, tubeShell%lat0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%lon0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%latc )
call initShellVar(sh, SHGR_CORNER, tubeShell%lonc )
call initShellVar(sh, SHGR_CORNER, tubeShell%invlat )
call initShellVar(sh, SHGR_CORNER, tubeShell%topo )
call initShellVar(sh, SHGR_CORNER, tubeShell%bmin )
call initShellVar(sh, SHGR_CORNER, tubeShell%bVol )
call initShellVar(sh, SHGR_CORNER, tubeShell%Lb )
call initShellVar(sh, SHGR_CORNER, tubeShell%Tb )
call initShellVar(sh, SHGR_CORNER, tubeShell%wMAG )
call initShellVar(sh, SHGR_CORNER, tubeShell%rCurv )
call initShellVar(sh, SHGR_CORNER, tubeShell%avgBeta)
do i=0,MAXTUBEFLUIDS
call initShellVar(sh, SHGR_CORNER, tubeShell%avgP(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%avgN(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%stdP(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%stdN(i))
enddo
call initShellVar(sh, SHGR_CORNER, tubeShell%losscone )
call initShellVar(sh, SHGR_CORNER, tubeShell%lossconec)
call initShellVar(sh, SHGR_CORNER, tubeShell%TioTe0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%nTrc )
tubeShell%TioTe0%data = tiote
end subroutine init_TubeShell
! Dipole flux tube info
subroutine DipoleTube(vApp,lat,lon,ijTube)
type(voltApp_T), intent(in) :: vApp
real(rp), intent(in) :: lat,lon
type(IMAGTube_T), intent(out) :: ijTube
real(rp) :: L,colat
real(rp) :: mdipole
mdipole = ABS(vapp%planet%magMoment)*G2T ! dipole moment in T
colat = PI/2 - lat
L = 1.0/(sin(colat)**2.0)
!ijTube%Vol = 32./35.*L**4.0/mdipole
!Use full dipole FTV formula, convert Rx/nT => Rx/T
ijTube%Vol = DipFTV_L(L,vapp%planet%magMoment)*1.0e+9
ijTube%X_bmin(XDIR) = L*cos(lon)*vApp%planet%rp_m !Rp=>meters
ijTube%X_bmin(YDIR) = L*sin(lon)*vApp%planet%rp_m !Rp=>meters
ijTube%X_bmin(ZDIR) = 0.0
ijTube%bmin = mdipole/L**3.0
ijTube%topo = 2
ijTube%beta_average = 0.0
ijTube%Pave = 0.0
ijTube%Nave = 0.0
ijTube%latc = -lat
ijTube%lonc = lon
ijTube%Lb = L !Just lazily using L shell
ijTube%Tb = 0.0
ijTube%losscone = 0.0
ijTube%rCurv = L/3.0
ijTube%wIMAG = 1.0 !Much imag
ijTube%TioTe0 = def_tiote
end subroutine DipoleTube
subroutine MHDTube(ebTrcApp,planet,colat,lon,r,ijTube,bTrc,nTrcO,doShiftO)
type(ebTrcApp_T), intent(in) :: ebTrcApp
type(planet_T), intent(in) :: planet
real(rp), intent(in) :: colat, lon, r
type(IMAGTube_T), intent(inout) :: ijTube
type(magLine_T), intent(inout) :: bTrc
integer, intent(in), optional :: nTrcO
logical, intent(in), optional :: doShiftO
integer :: s
real(rp) :: t, bMin,bIon
real(rp), dimension(NDIM) :: x0, bEq, xyzIon
real(rp), dimension(NDIM) :: xyzC,xyzIonC
integer :: OCb
real(rp) :: bD,bP,dvB,bBeta,rCurv, bDRC, bPRC, bBetaRC, stdP, stdD
real(rp) :: VaMKS,CsMKS,VebMKS !Speeds in km/s
real(rp) :: TiEV !Temperature in ev
logical :: doShift,doShue
if (present(doShiftO)) then
doShift = doShiftO
else
doShift = .false.
endif
! Take seed point in spherical coordinates
xyzIon(XDIR) = r*sin(colat)*cos(lon)
xyzIon(YDIR) = r*sin(colat)*sin(lon)
xyzIon(ZDIR) = r*cos(colat)
if (doShift) then
!! TODO: Decide if this is how we want to do it
x0 = DipoleShift(xyzIon, planet%ri_m/planet%rp_m + TINY)
else
x0 = xyzIon
endif
bIon = norm2(DipoleB0(xyzIon))*oBScl*1.0e-9 !EB=>T, ionospheric field strength
associate(ebModel=>ebTrcApp%ebModel,ebGr=>ebTrcApp%ebState%ebGr,ebState=>ebTrcApp%ebState)
!Now do field line trace
t = ebState%eb1%time !Time in CHIMP units
!!TODO: What do we do when we want times in between steps? Somethign similar to what slice/chop do to output
if (present(nTrcO)) then
call genLine(ebModel,ebState,x0,t,bTrc,nTrcO,doShueO=.false.,doNHO=.true.)
else
call genLine(ebModel,ebState,x0,t,bTrc, doShueO=.false.,doNHO=.true.)
endif
!Topology
!OCB = 0 (solar wind), 1 (half-closed), 2 (both ends closed)
OCb = FLTop(ebModel,ebGr,bTrc)
ijTube%topo = OCb
if (OCb /= 2) then
! If the field line hasn't closed after 15 minutes we are legally allowed to leave
ijTube%X_bmin = 0.0
ijTube%bmin = 0.0
ijTube%Vol = -1.0
ijTube%Pave = 0.0
ijTube%Nave = 0.0
ijTube%Pstd = 0.0
ijTube%Nstd = 0.0
ijTube%beta_average = 0.0
ijTube%latc = 0.0
ijTube%lonc = 0.0
ijTube%Lb = 0.0
ijTube%Tb = 0.0
ijTube%losscone = 0.0
ijTube%rCurv = 0.0
ijTube%wIMAG = 0.0
ijTube%Veb = 0.0
return
endif
!Get diagnostics from closed field line
!Minimal surface (bEq in Rp, bMin in EB)
call FLEq(ebModel,bTrc,bEq,bMin)
bMin = bMin*oBScl*1.0e-9 !EB=>Tesla
bEq = bEq*planet%rp_m ! Rp -> meters
! Plasma quantities
do s=0,ebModel%nSpc
!dvB = Flux-tube volume (Re/EB)
!write(*,*)"FLThermo, s=",s
call FLThermo(ebModel,ebGr,bTrc,bD,bP,dvB,bBeta,s)
call FLStdev (ebModel,ebGr,bTrc,bD,bP,stdD,stdP,s)
!call FLStdev (ebModel,ebGr,bTrc,stdD,stdP,s)
bP = bP*1.0e-9 !nPa=>Pa
bD = bD*1.0e+6 !#/cc => #/m3
ijTube%Pave(s) = bP
ijTube%Nave(s) = bD
ijTube%Pstd(s) = stdP * 1.0e-9 ! nPa=>Pa
ijTube%Nstd(s) = stdD * 1.0e+6 ! #/cc=>#/m3
if (s .eq. RCFLUID) then
bPRC = bP
bDRC = bD
bBetaRC = bBeta
endif
enddo
!Converts Rp/EB => Rp/T
dvB = dvB/(oBScl*1.0e-9)
ijTube%X_bmin = bEq
ijTube%bmin = bMin
ijTube%Vol = dvB ! Taken from last FLThermo call
ijTube%beta_average = bBetaRC
call FLConj(ebModel,ebGr,bTrc,xyzC)
if (doShift) then
xyzIonC = DipoleShift(xyzC, planet%ri_m)
else
xyzIonC = xyzC
endif
ijTube%latc = asin(xyzIonC(ZDIR)/norm2(xyzIonC))
ijTube%lonc = modulo( atan2(xyzIonC(YDIR),xyzIonC(XDIR)),2*PI )
ijTube%Lb = FLArc(ebModel,ebGr,bTrc)
!NOTE: Bounce timescale may be altered to use IMAG hot density
ijTube%Tb = FLAlfvenX(ebModel,ebGr,bTrc)
!ijTube%Tb = FLFastX(ebModel,ebGr,bTrc)
!write(*,*) 'Bounce compare: = ', FLFastX(ebModel,ebGr,bTrc)/FLAlfvenX(ebModel,ebGr,bTrc)
ijTube%losscone = asin(sqrt(bMin/bIon))
!Get curvature radius and ExB velocity [km/s]
call FLCurvRadius(ebModel,ebGr,ebState,bTrc,rCurv,VebMKS)
ijTube%rCurv = rCurv
ijTube%Veb = VebMKS
!Get confidence interval
!VaMKS = flux tube arc length [km] / Alfven crossing time [s]
VaMKS = (ijTube%Lb*planet%rp_m*1.0e-3)/ijTube%Tb
!CsMKS = 9.79 x sqrt(5/3 * Ti) km/s, Ti eV
!TiEV = (1.0e+3)*DP2kT(bDRC*1.0e-6,bPRC*1.0e+9) !Temp in eV
TiEV = (1.0e+3)*DP2kT(ijTube%Nave(BLK)*1.0D-6,ijTube%Pave(BLK)*1.0D9) !Temp in eV
CsMKS = 9.79*sqrt((5.0/3)*TiEV)
ijTube%wIMAG = VaMKS/( sqrt(VaMKS**2.0 + CsMKS**2.0) + VebMKS)
end associate
end subroutine MHDTube
end module imagtubes

View File

@@ -74,7 +74,9 @@ module innermagsphere
!call vApp%imagApp%doInit(iXML,gApp%Model%isRestart,vApp)
call vApp%imagApp%InitModel(iXML)
call vApp%imagApp%InitIO(iXML)
if(vApp%writeFiles) then
call vApp%imagApp%InitIO(iXML)
endif
end subroutine InitInnerMag

View File

@@ -79,13 +79,11 @@ module imag2mhd_interface
!Below inner boundary, do dipole projection
isGoodCC(i,j,k) = .true.
xyz = Gr%xyzcc(i,j,k,:) !Gamera grid center
call Proj2Rad(xyz,Rion,x1,x2)
Gr%Gas0(i,j,k,PROJLAT) = x1
call NHProj(xyz,x1,x2)
Gr%Gas0(i,j,k,PROJLAT) = x1 !Must project to NH
Gr%Gas0(i,j,k,PROJLON) = x2
else
!Get value from xyzsquish
if ( all(vApp%chmp2mhd%isGood(i:i+1,j:j+1,k:k+1)) ) then
!All values are good, so just do this thing
call SquishCorners(vApp%chmp2mhd%xyzSquish(i:i+1,j:j+1,k:k+1,1),Qs)
@@ -123,7 +121,7 @@ module imag2mhd_interface
if (i < Gr%is) then
!Use dipole projection
xyz = Gr%xyz(i,j,k,:) !Gamera grid corner
call Proj2Rad(xyz,Rion,x1,x2)
call NHProj(xyz,x1,x2)
isG = .true.
else
x1 = vApp%chmp2mhd%xyzSquish(i,j,k,1)
@@ -284,37 +282,43 @@ module imag2mhd_interface
real(rp), intent(inout) :: Q(Gr%isg:Gr%ieg,Gr%jsg:Gr%jeg,Gr%ksg:Gr%keg)
integer :: i,j,k,ip,jp,kp
logical :: isActive
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(i,j,k,isActive,ip,jp,kp)
!$OMP private(i,j,k,ip,jp,kp)
do k=Gr%ksg,Gr%keg
do j=Gr%jsg,Gr%jeg
do i=Gr%isg,Gr%ieg
isActive = (j >= Gr%js) .and. (j <= Gr%je) .and. &
(k >= Gr%ks) .and. (k <= Gr%ks)
if (isActive) cycle
!If still here map this ghost to active and set value based on active
call lfmIJKcc(Model,Gr,i,j,k,ip,jp,kp)
Q(i,j,k) = Q(ip,jp,kp)
if (.not. isPhysical(Model,Gr,i,j,k)) then
!This is a geometric ghost so we can map it to a physical cell
call lfmIJKcc(Model,Gr,i,j,k,ip,jp,kp)
Q(i,j,k) = Q(ip,jp,kp)
endif !isPhys
enddo
enddo !j
enddo !k
end subroutine FillGhostsCC
!Project xyz along dipole to R0 and return lat (x1) and lon (x2)
subroutine Proj2Rad(xyz,R0,x1,x2)
real(rp), intent(in ) :: xyz(NDIM), R0
!Checks if cell is "physical" as opposed to "geometric".
!Ie, i ghosts are physical but j/k ghosts are geometric (they point to other physical cells)
function isPhysical(Model,Gr,i,j,k)
type(Model_T), intent(in) :: Model
type(Grid_T) , intent(in) :: Gr
integer, intent(in) :: i,j,k
logical :: isPhysical
isPhysical = (j >= Gr%js) .and. (j <= Gr%je) .and. &
(k >= Gr%ks) .and. (k <= Gr%ke)
end function isPhysical
!Get azimuth and invariant latitude
subroutine NHProj(xyz,x1,x2)
real(rp), intent(in ) :: xyz(NDIM)
real(rp), intent(out) :: x1,x2
real(rp), dimension(NDIM) :: xyz0
xyz0 = DipoleShift(xyz,R0)
x1 = asin(xyz0(ZDIR)/R0) !Lat
x2 = katan2(xyz0(YDIR),xyz0(XDIR)) !katan => [0,2pi] instead of [-pi,pi]
end subroutine Proj2Rad
x1 = InvLatitude(xyz)
x2 = katan2(xyz(YDIR),xyz(XDIR)) !katan => [0,2pi] instead of [-pi,pi]
end subroutine NHProj
end subroutine

View File

@@ -10,59 +10,8 @@ module imag2mix_interface
implicit none
type(mixGrid_T), private :: rai2mixG
integer, private :: Np_mix, Nt_mix, Np_rai, Nt_rai, Npc_rai, Ntc_rai
logical, private :: isInit = .true.
contains
subroutine init_raiju_mix(imagApp,remixApp)
! called by subroutine initializeFromGamera in module voltapp
! type(voltApp_T), intent(inout) :: vApp
class(imagCoupler_T), intent(in) :: imagApp
type(mixApp_T), intent(inout) :: remixApp
real(rp), dimension(:,:), allocatable :: raijup, raijut ! Np x Nt, remix-style 2-D arrays to hold the RAIJU grid
integer :: i, j
! associate(remixApp=>vApp%remixApp, imagApp=>vApp%imagApp )
Nt_mix = remixApp%ion(NORTH)%G%Nt
Np_mix = remixApp%ion(NORTH)%G%Np
SELECT TYPE (imagA=>imagApp)
TYPE IS (raijuCoupler_T)
! in here you can treat imagType as if it is type raijuCoupler_T, and it points to vApp%imagApp
Np_rai = imagA%raiApp%Grid%shGrid%Np
Nt_rai = imagA%raiApp%Grid%shGrid%Nt
! Np x Nt, transposed relative to mix grid.
if (.not.allocated(raijup)) allocate(raijup(Np_rai, Nt_rai))
if (.not.allocated(raijut)) allocate(raijut(Np_rai, Nt_rai))
! construct the 2-D grid
!! thc/phc: (Nt or Np) [radians] grid centers
!! th (theta) is colatitude and runs from north pole toward south
do j=1,Np_rai
raijut(j,:) = imagA%raiApp%Grid%shGrid%thc
enddo
!! Phi is longitude, with zero/2pi at 12 MLT
do i=1,Nt_rai
raijup(:,i) = imagA%raiApp%Grid%shGrid%phc
enddo
! call remix grid constructor
call init_grid_fromTP(rai2mixG,raijut(1:Np_rai-1,:),raijup(1:Np_rai-1,:),isSolverGrid=.false.)
Npc_rai = rai2mixG%Np ! Np_rai-1
Ntc_rai = rai2mixG%Nt
CLASS DEFAULT
WRITE (*,*) "Imag Coupler is an unsupported type"
stop
END SELECT
! end associate
end subroutine init_raiju_mix
subroutine CoupleIMagToMix(vApp)
class(voltApp_T), intent(inout) :: vApp
integer :: i,j
@@ -231,7 +180,7 @@ module imag2mix_interface
imP_avg(RAI_EDEN) = imP_avg(RAI_EDEN )/nPnts**2
imP_avg(RAI_EPRE) = imP_avg(RAI_EPRE )/nPnts**2
imP_avg(RAI_NPSP) = imP_avg(RAI_NPSP )/nPnts**2
imP_avg(RAI_EAVG) = imP_avg(RAI_EFLUX) / imP_avg(RAI_ENFLX)
imP_avg(RAI_EAVG) = imP_avg(RAI_EFLUX) / max(TINY, imP_avg(RAI_ENFLX))
imP_avg(RAI_GTYPE) = imP_avg(RAI_GTYPE)/nGood
imP_avg(RAI_THCON) = imP_avg(RAI_THCON)/nGood
imP_avg(RAI_PHCON) = imP_avg(RAI_PHCON)/nGood
@@ -265,141 +214,5 @@ module imag2mix_interface
end subroutine CoupleIMagToMix
subroutine mapRaijuToRemix(vApp)
type(voltApp_T), intent(inout) :: vApp
! type(raijuCoupler_T), intent(inout) :: imagApp
! type(mixApp_T), intent(inout) :: remixApp
real(rp), dimension(:,:,:), allocatable :: rai_fluxes, mix_fluxes
real(rp), dimension(:,:), allocatable :: mix_flux
real(rp), dimension(:,:), allocatable :: mixt, mixp
real(rp), dimension(:,:), allocatable :: raijup, raijut ! Np x Nt, remix-style 2-D arrays to hold the RAIJU grid
real(rp), dimension(:), allocatable :: phc, thc
integer :: Nf = nVars_imag2mix
integer :: i,j
type(Map_T) :: raiMap
! collect raiju fluxes.
! in getMomentsPrecip: allocate(rai_fluxes (is:ie,js:je,nVars_imag2mix)), (Nt_rai, Np_rai, Nf)
! call vApp%imagApp%getMomentsPrecip(rai_fluxes)
associate(remixApp=>vApp%remixApp ) !, imagApp=>vApp%imagApp
allocate(mix_fluxes(Np_mix,Nt_mix,Nf))
allocate(mix_flux(Np_mix,Nt_mix))
mix_fluxes = 0.0_rp
mix_flux = 0.0_rp
mixt = remixApp%ion(NORTH)%G%t ! G%t(G%Np,G%Nt)
mixp = remixApp%ion(NORTH)%G%p
! NH mapping to remix
call mix_set_map(rai2mixG,remixApp%ion(NORTH)%G,raiMap)
do i=1,Nf
call mix_map_grids(raiMap,transpose(rai_fluxes(:,1:Npc_rai,i)), mix_flux)
mix_fluxes(:,:,i) = mix_flux
enddo
remixApp%ion(NORTH)%St%Vars(:,:,IM_EAVG ) = mix_fluxes(:,:,RAI_EAVG ) ! [keV]
remixApp%ion(NORTH)%St%Vars(:,:,IM_ENFLX) = mix_fluxes(:,:,RAI_ENFLX) ! [#/cm^2/s]
remixApp%ion(NORTH)%St%Vars(:,:,IM_EFLUX) = mix_fluxes(:,:,RAI_EFLUX) ! [ergs/cm^2/s]
remixApp%ion(NORTH)%St%Vars(:,:,IM_GTYPE) = mix_fluxes(:,:,RAI_GTYPE) ! [0~1]
remixApp%ion(NORTH)%St%Vars(:,:,IM_EDEN ) = mix_fluxes(:,:,RAI_EDEN ) ! [#/m^3]
remixApp%ion(NORTH)%St%Vars(:,:,IM_EPRE ) = mix_fluxes(:,:,RAI_EPRE ) ! [Pa]
remixApp%ion(NORTH)%St%Vars(:,:,IM_NPSP ) = mix_fluxes(:,:,RAI_NPSP ) ! [#/m^3]
! SH mapping
mix_fluxes = 0.0_rp
call mapRaijuSToRemix(rai_fluxes,mixt,mixp,mix_fluxes)
remixApp%ion(SOUTH)%St%Vars(:,:,IM_EAVG ) = mix_fluxes(Np_mix:1:-1,:,RAI_EAVG ) ! [keV]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_ENFLX) = mix_fluxes(Np_mix:1:-1,:,RAI_ENFLX) ! [#/cm^2/s]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_EFLUX) = mix_fluxes(Np_mix:1:-1,:,RAI_EFLUX) ! [ergs/cm^2/s]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_GTYPE) = mix_fluxes(Np_mix:1:-1,:,RAI_GTYPE) ! [0~1]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_EDEN ) = mix_fluxes(Np_mix:1:-1,:,RAI_EDEN ) ! [#/m^3]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_EPRE ) = mix_fluxes(Np_mix:1:-1,:,RAI_EPRE ) ! [Pa]
remixApp%ion(SOUTH)%St%Vars(:,:,IM_NPSP ) = mix_fluxes(Np_mix:1:-1,:,RAI_NPSP ) ! [#/m^3]
end associate
end subroutine mapRaijuToRemix
subroutine mapRaijuSToRemix(rai_fluxes,mixt,mixp,mix_fluxes)
! Directly map from irregular raiju SH grid to ReMIX.
real(rp), dimension(Nt_rai, Np_rai,nVars_imag2mix), intent(in) :: rai_fluxes
real(rp), dimension(Np_mix, Nt_mix), intent(in) :: mixt, mixp
real(rp), dimension(Np_mix, Nt_mix,nVars_imag2mix), intent(out) :: mix_fluxes
real(rp), dimension(Nt_rai, Np_rai) :: colatc, glongc
real(rp), dimension(:,:), allocatable :: mixtE, mixpE
real(rp), dimension(Np_mix, Nt_mix) :: Ainvdwgt2
real(rp) :: dlat, delt, delp, invdwgt
integer :: i, j, i0, j0, il, iu, jp, dj
! Source grid: latc is negative. colatc is positive from ~15 to 75 deg. Note latc=0 for open field lines.
colatc = PI-rai_fluxes(:,:,RAI_THCON) ! RAI_THCON is conjugate co-lat in radians pi/2-pi, PI - RAI_THCON is -> 0-pi/2
glongc = rai_fluxes(:,:,RAI_PHCON) ! conjugate long in radians, 0-2pi, need to double check if they are consistent at lon=0
! Destination grid: remix Grid.
dlat = mixt(1,2)-mixt(1,1)
! dj is the ratio of rcm dlon to remix dlon, i.e. min number of rcm/raiju cells to collect.
! now with 360 raiju/rcm longitudinal cells, dj is 1 with quad res remix grid.
dj = nint(dble(Np_mix)/dble(Np_rai))
! make an extended mixt/mixp grid for easier periodic boundary processing.
allocate(mixtE(1-dj:Np_mix+dj,1:Nt_mix))
allocate(mixpE(1-dj:Np_mix+dj,1:Nt_mix))
mixtE(1:Np_mix,:) = mixt
mixtE(1-dj:0,:) = mixt(Np_mix+1-dj:Np_mix,:)
mixtE(Np_mix+1:Np_mix+dj,:) = mixt(1:dj,:)
mixpE(1:Np_mix,:) = mixp
mixpE(1-dj:0,:) = mixp(Np_mix+1-dj:Np_mix,:)
mixpE(Np_mix+1:Np_mix+dj,:) = mixp(1:dj,:)
! Mapping: remix dlat is ~10x of rcm, dlon is ~1/3.6 of rcm. Remix lat is from 0-45 deg. RCM is from 15-75 deg.
! For each rcm SH point, find the nearest remix lat. If it's not too far away (within dlat) then
! find the nearest remix lon. Assign rcm contribution to the nearest lat shell within 2 rcm dlon.
! The difference is due to remix dlat is larger while dlon is smaller. Need to make sure all remix grids have some contribution from rcm.
! Lastly, normalize the contribution by total IDW.
Ainvdwgt2 = 0.0_rp
mix_fluxes = 0.0_rp
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(i,j,i0,il,iu,j0,jp,delt,delp,invdwgt) &
!$OMP reduction(+:Ainvdwgt2,mix_fluxes)
do j=1,Np_rai
do i=1,Nt_rai
i0 = minloc(abs(mixt(1,:)-colatc(i,j)),1) ! Find the nearest remix colat index for rcm colatc(i,j)
if(mixt(1,i0)<=colatc(i,j)) then ! If the nearest remix colat is < rcm colatc, only collect rcm to this colat and its next grid.
il=i0
iu=min(i0+1,Nt_mix)
else ! Otherwise, collect from this point and its neighbor lat.
il=max(i0-1,1)
iu=i0
endif
do i0=il,iu
! For any remix grid, interpolate if rcm lat is within dlat away
if(abs(mixt(1,i0)-colatc(i,j))<dlat) then
jp = minloc(abs(mixp(:,1)-glongc(i,j)),1)
! 1 <= jp <= Np_mix
! jp-dj>= 1-dj; jp+dj<= Np_mix+dj
! mixtE/mixpE is from 1-dj:Np_mix+dj
do j0=jp-dj,jp+dj
delt = abs(mixtE(j0,i0)-colatc(i,j))
delp = abs((mixpE(j0,i0)-glongc(i,j)))*sin(mixtE(j0,i0))
invdwgt = 1.0_rp/sqrt(delt**2+delp**2)
mix_fluxes(j0,i0,:) = mix_fluxes(j0,i0,:) + rai_fluxes(i,j,:)*invdwgt
Ainvdwgt2(j0,i0) = Ainvdwgt2(j0,i0) + invdwgt
enddo
endif
enddo
! endif
enddo
enddo
!$OMP PARALLEL DO default(shared) collapse(2) &
!$OMP private(i0,j0)
do j0=1,Np_mix
do i0=1,Nt_mix
if(Ainvdwgt2(j0,i0)>0.0_rp) then
mix_fluxes(j0,i0,:) = mix_fluxes(j0,i0,:)/Ainvdwgt2(j0,i0)
endif
enddo
enddo
end subroutine mapRaijuSToRemix
end module

View File

@@ -43,11 +43,7 @@ submodule (volttypes) raijuCplTypesSub
! Update MJD with whatever voltron handed us
! If we are restarting, this will get replaced with whatever's in file later
App%raiApp%State%mjd = App%opt%mjd0
write(*,*)"MJD0=",App%opt%mjd0
if (App%opt%doColdStart) then
! We are gonna cold start, so ignore plasma ingestion rules for first coupling
App%raiApp%State%isFirstCpl = .false.
endif
! Then allocate and initialize coupling variables based on raiju app
call raijuCpl_init(App, xml)
@@ -58,61 +54,29 @@ submodule (volttypes) raijuCplTypesSub
class(raijuCoupler_T), intent(inout) :: App
class(voltApp_T), intent(inout) :: vApp
logical :: doFirstColdStart
logical :: doUpdateColdStart
real(rp) :: BSDst
doFirstColdStart = .false.
doUpdateColdStart = .false.
associate(raiApp=>App%raiApp)
! If we are running realtime, its our job to get everything we need from vApp into raiCpl
if (.not. App%raiApp%Model%isSA) then
! First, determine if we should cold start, i.e. Completely reset raiju's eta's to match some target conditions
! Determine if we should cold start before packing coupler because it will set tLastUpdate to vApp%time and then we can't do the checks we want
! But actually do cold start after coupler packing completes so we can use real field line info
! Do we do our very first coldstart ever
if (App%opt%doColdStart .and. App%tLastUpdate < 0.0 .and. vApp%time >= 0.0) then
doFirstColdStart = .true.
endif
! Do we do "updates" to our coldstart during pre-conditioning period
if(App%opt%doColdStart .and. App%tLastUpdate > 0.0 .and. vApp%time < App%startup_blendTscl) then
doUpdateColdStart = .true.
endif
call packRaijuCoupler_RT(App, vApp)
endif
! Someone updated raiCpl's coupling variables by now, stuff it into RAIJU proper
call raiCpl2RAIJU(App)
if (.not. raiApp%State%coldStarter%doneFirstCS .or. vApp%time < raiApp%State%coldStarter%tEnd) then
!! Make sure we run at least once
call setActiveDomain(raiApp%Model, raiApp%Grid, raiApp%State)
! Calc voltron dst ourselves since vApp%BSDst is only set on console output
call EstDST(vApp%gApp%Model,vApp%gApp%Grid,vApp%gApp%State,BSDst0=BSDst)
call raijuGeoColdStart(raiApp%Model, raiApp%Grid, raiApp%State, vApp%time, BSDst)
endif
!if (doFirstColdStart) then
! ! Its happening, everybody stay calm
! write(*,*) "RAIJU Doing first cold start..."
! ! NOTE: By this point we have put coupling info into raiju (e.g. bVol, xyzmin, MHD moments)
! ! But haven't calculated active domain yet because that happens in preadvancer
! ! So we jump in and do it here so we have it for cold starting
! call setActiveDomain(raiApp%Model, raiApp%Grid, raiApp%State)
! ! Calc voltron dst ourselves since vApp%BSDst is only set on console output
! call EstDST(vApp%gApp%Model,vApp%gApp%Grid,vApp%gApp%State,BSDst0=BSDst)
! call raijuGeoColdStart(raiApp%Model, raiApp%Grid, raiApp%State, vApp%time, BSDst, doCXO=App%doColdstartCX,doPsphO=.true.)
!endif
!if (doUpdateColdStart) then
! write(*,*)"RAIJU doing update cold start at t=",vApp%time
! write(*,*)" (calculating model BSDst,)",vApp%time
! call setActiveDomain(raiApp%Model, raiApp%Grid, raiApp%State)
! call EstDST(vApp%gApp%Model,vApp%gApp%Grid,vApp%gApp%State,BSDst0=BSDst)
! call raijuGeoColdStart(raiApp%Model, raiApp%Grid, raiApp%State, vApp%time, BSDst, doCXO=App%doColdstartCX,doPsphO=.false.)
!endif
associate(cs=>raiApp%State%coldStarter)
if (.not. cs%doneFirstCS .or. (cs%doUpdate .and. vApp%time < cs%tEnd) ) then
!! Make sure we run at least once
! Calc voltron dst ourselves since vApp%BSDst is only set on console output
call EstDST(vApp%gApp%Model,vApp%gApp%Grid,vApp%gApp%State,BSDst0=BSDst)
raiApp%State%coldStarter%doCS_next_preAdv = .true.
raiApp%State%coldStarter%modelDst_next_preAdv = BSDst
!call setActiveDomain(raiApp%Model, raiApp%Grid, raiApp%State)
!call raijuGeoColdStart(raiApp%Model, raiApp%Grid, raiApp%State, vApp%time, BSDst)
endif
end associate
end associate
end subroutine volt2RAIJU
@@ -134,6 +98,10 @@ submodule (volttypes) raijuCplTypesSub
real(rp) :: active_interp
real(rp) :: d_cold, t_cold, d_hot, p_hot
real(rp) :: tScl, rampC
real(rp) :: wMHD,wRAI,alpha,beta,p_mhd
logical, parameter :: doWolf = .true.
real(rp), parameter :: wRAI0 = 0.5
associate(Model=>App%raiApp%Model, State=>App%raiApp%State, sh=>App%raiApp%Grid%shGrid, spcList=>App%raiApp%Grid%spc)
@@ -179,22 +147,32 @@ submodule (volttypes) raijuCplTypesSub
endif
enddo
call InterpShellVar_TSC_pnt(sh, State%Tb, th, ph, tScl)
tScl = Model%nBounce*tScl ! [s]
!tScl = 10.0_rp ! [s]
!Wolf-limit the pressure going back
if (doWolf) then
!You're sending the wolf? Shit, that's all you had to say
call InterpShellVar_TSC_pnt(App%shGr, App%avgBeta , th, ph, beta )
call InterpShellVar_TSC_pnt(App%shGr, App%Pavg(BLK), th, ph, p_mhd) !MHD flux-tube averaged pressure (sum over all fluids)
alpha = 1.0 + beta*5.0/6.0
wRAI = 1.0/alpha !Full wolf-limited weight
wRAI = max(wRAI,wRAI0) !Don't allow Raiju contribution to drop below wRCM0
call ClampValue(wRAI,0.0_rp,1.0_rp)
wMHD = 1 - wRAI ! = (alpha-1)/alpha w/o any clamping
imW(IM_P_RING) = imW(IM_P_RING)*wRAI + wMHD*p_mhd
endif
!call InterpShellVar_TSC_pnt(sh, State%Tb, th, ph, tScl)
!tScl = Model%nBounce*tScl ! [s]
! 1/(x)^4 for x from 1 to 0.5 goes from 1 to 16. Higher exponents means stronger ramp-up
!tScl = 15.0_rp/(App%vaFrac%data(i0,j0))**4 ! [s]
!tScl = 15.0_rp/(App%vaFrac%data(i0,j0))**2 ! [s]
call InterpShellVar_TSC_pnt(sh, App%tscl_mhdIngest, th, ph, tScl)
! Adjust IM_TSCL if we wanna ramp up over time
if (t < App%startup_blendTscl) then
rampC = RampDown(t, 0.0_rp, App%startup_blendTscl)
!tScl = sqrt(tScl*App%startup_blendTscl)*rampC + (1-rampC)*tScl ! idk
tScl = rampC*30.0_rp*tScl + (1-rampC)*tScl ! No good reason for 30 except for wanting starting tScl to be ~8-10 minutes
!if (th > 50*deg2rad .and. th < 55*deg2rad .and. ph > 130*deg2rad .and. ph < 150*deg2rad) then
! write(*,*)"--",t,App%startup_blendTscl,rampC,tScl
!endif
endif
imW(IM_TSCL) = tScl
@@ -285,21 +263,22 @@ submodule (volttypes) raijuCplTypesSub
endif
enddo
! Mock up cold electrons balancing hot protons and see if it produces meaningful flux
call InterpShellVar_TSC_pnt(sh, State%Den_avg(idx_proton) , th, ph, d_ion)
call InterpShellVar_TSC_pnt(sh, State%Press_avg(idx_proton) , th, ph, p_ion)
pie_frac = 0.05 ! Fraction of ion pressure contained by these neutralizing electrons
pe_kT = DP2kT(d_ion, p_ion*pie_frac) ! [keV]
pe_nflux = imP(RAI_ENFLX)*d_ion/d_hot ! Scale number flux to same loss processes except there were d_ion density instead of d_electron density
pe_eflux = (pe_kT*kev2erg)*pe_nflux ! [erg/cm^2/s]
if (pe_eflux > imP(RAI_EFLUX)) then ! Use in place of normal flux only if energy flux for these are greater than hot electron channel fluxes
imP(RAI_EFLUX) = pe_eflux
imP(RAI_ENFLX) = pe_nflux
imP(RAI_EDEN ) = d_ion*1.0e6 ! [#/m^3]
imP(RAI_EPRE ) = p_ion*pie_frac*1.0e-9 ! [Pa]
endif
endif
!K: Removing this code for now, should be rewritten to use the MHD D,P => precip routines
! call InterpShellVar_TSC_pnt(sh, State%Den_avg(idx_proton) , th, ph, d_ion)
! call InterpShellVar_TSC_pnt(sh, State%Press_avg(idx_proton) , th, ph, p_ion)
! pie_frac = 0.05 ! Fraction of ion pressure contained by these neutralizing electrons
! pe_kT = DP2kT(d_ion, p_ion*pie_frac) ! [keV]
! pe_nflux = imP(RAI_ENFLX)*d_ion/d_hot ! Scale number flux to same loss processes except there were d_ion density instead of d_electron density
! pe_eflux = (pe_kT*kev2erg)*pe_nflux ! [erg/cm^2/s]
! if (pe_eflux > imP(RAI_EFLUX)) then ! Use in place of normal flux only if energy flux for these are greater than hot electron channel fluxes
! imP(RAI_EFLUX) = pe_eflux
! imP(RAI_ENFLX) = pe_nflux
! imP(RAI_EDEN ) = d_ion*1.0e6 ! [#/m^3]
! imP(RAI_EPRE ) = p_ion*pie_frac*1.0e-9 ! [Pa]
! endif
endif !spcList(s)%spcType == X
enddo
! derive mean energy where nflux is non-trivial.
if (imP(RAI_ENFLX) > TINY) imP(RAI_EAVG) = imP(RAI_EFLUX)/imP(RAI_ENFLX) * erg2kev ! Avg E [keV]
@@ -373,6 +352,8 @@ submodule (volttypes) raijuCplTypesSub
real(rp), intent(in) :: dt
call App%raiApp%AdvanceModel(dt)
call raiCpl_PostAdvance(App)
App%raiApp%State%t = App%raiApp%State%t + dt
App%raiApp%State%ts = App%raiApp%State%ts + 1
App%raiApp%State%mjd = T2MJD(dt,App%raiApp%State%mjd)

View File

@@ -10,8 +10,8 @@ module raijuCplHelper
use raijugrids
use ioh5
use files
use arrayutil
use imagtubes
use mixdefs
use raijuColdStartHelper, only : initRaijuColdStarter
@@ -38,6 +38,8 @@ module raijuCplHelper
! Options
call iXML%Set_Val(raiCpl%startup_blendTscl, "cpl/startupTscl", raiCpl%startup_blendTscl)
call iXML%Set_Val(raiCpl%tsclSm_dL , "cpl/tsclSm_dL" , raiCpl%tsclSm_dL )
call iXML%Set_Val(raiCpl%tsclSm_dMLT, "cpl/tsclSm_dMLT", raiCpl%tsclSm_dMLT)
! State sub-modules that need coupler settings
call initRaijuColdStarter(raiCpl%raiApp%Model, iXML, raiCpl%raiApp%State%coldStarter,tEndO=raiCpl%startup_blendTscl)
@@ -79,6 +81,9 @@ module raijuCplHelper
enddo
call initShellVar(raiCpl%shGr, SHGR_CC, raiCpl%tiote)
call initShellVar(raiCpl%shGr, SHGR_CC, raiCpl%Tb)
call initShellVar(raiCpl%shGr, SHGR_CC, raiCpl%avgBeta)
call initShellVar(raiCpl%shGr, SHGR_CC, raiCpl%tscl_mhdIngest)
end associate
! Initial values
@@ -146,8 +151,8 @@ module raijuCplHelper
do i=1,NDIM
call InterpShellVar_TSC_SG(raiCpl%shGr, raiCpl%xyzMin(i), raiCpl%shGr, raiCpl%xyzMincc(i), srcMaskO=topoSrcMask)
enddo
call InterpShellVar_TSC_SG(voltGrid, tubeShell%Tb, raiCpl%shGr, raiCpl%Tb, srcMaskO=topoSrcMask)
call InterpShellVar_TSC_SG(voltGrid, tubeShell%Tb , raiCpl%shGr, raiCpl%Tb , srcMaskO=topoSrcMask)
call InterpShellVar_TSC_SG(voltGrid, tubeShell%avgBeta, raiCpl%shGr, raiCpl%avgBeta, srcMaskO=topoSrcMask)
end subroutine tubeShell2RaiCpl
@@ -219,50 +224,6 @@ module raijuCplHelper
end associate
end subroutine
!------
! One-way driving from file helpers
!------
subroutine genImagTubes(raiCpl, vApp)
class(raijuCoupler_T), intent(inout) :: raiCpl
type(voltApp_T), intent(in ) :: vApp
integer :: i,j
real(rp) :: seedR, eqR
type(magLine_T) :: magLine
! Get field line info and potential from voltron
! And put the data into RAIJU's fromV coupling object
associate(sh=>raiCpl%raiApp%Grid%shGrid , &
planet=>raiCpl%raiApp%Model%planet, &
ebApp =>vApp%ebTrcApp)
seedR = planet%ri_m/planet%rp_m
! Do field line tracing, populate fromV%ijTubes
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i,j,eqR)
do i=sh%isg,sh%ieg+1
do j=sh%jsg,sh%jeg+1
call CleanLine(raiCpl%magLines(i,j))
eqR = DipColat2L(raiCpl%raiApp%Grid%thRp(i)) ! Function assumes colat coming from 1 Rp, make sure we use the right theta value
if (eqR < raiCpl%opt%mhdRin) then
call DipoleTube(vApp, sh%th(i), sh%ph(j), raiCpl%ijTubes(i,j))
else
call MHDTube(ebApp, planet, & !ebTrcApp, planet
sh%th(i), sh%ph(j), seedR, & ! colat, lon, r
raiCpl%ijTubes(i,j), raiCpl%magLines(i,j), & ! IMAGTube_T, magLine_T
doShiftO=.true.)
endif
enddo
enddo
end associate
end subroutine genImagTubes
!------
! Real-time coupling stuff
!------
@@ -283,6 +244,177 @@ module raijuCplHelper
end subroutine
!------
! Post-advance calculations
!------
subroutine raiCpl_PostAdvance(raiCpl)
class(raijuCoupler_T), intent(inout) :: raiCpl
call raiCpl_calcTsclMHD(raiCpl)
end subroutine raiCpl_PostAdvance
subroutine raiCpl_calcTsclMHD(raiCpl)
class(raijuCoupler_T), intent(inout) :: raiCpl
integer :: i,j
real(rp) :: vaFrac_cc
associate(tscl=>raiCpl%tscl_mhdIngest, State=>raiCpl%raiApp%State, sh=>raiCpl%shGr)
! Defaults
call fillArray(tscl%data, State%dt)
where (State%active /= RAIJUINACTIVE)
tscl%mask = .true.
elsewhere
tscl%mask = .false.
end where
! First, calculate our tscl point-by-point
!$OMP PARALLEL DO default(shared) &
!$OMP private(i,j,vaFrac_cc)
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ieg
if (tscl%mask(i,j)) then
vaFrac_cc = 0.25*sum(State%vaFrac(i:i+1,j:j+1))
tscl%data(i,j) = raiCpl%raiApp%Model%nBounce*State%dt/(vaFrac_cc)**2
endif
enddo
enddo
! Now do smoothing
call smoothRaijuVar_eq(State, sh, raiCpl%tsclSm_dMLT, raiCpl%tsclSm_dL, tscl)
! We had the mask include buffer cells for smoothing reasons, but now we want anyone interpolating to only use active cells
where (State%active == RAIJUACTIVE)
tscl%mask = .true.
elsewhere
tscl%mask = .false.
end where
end associate
contains
subroutine smoothRaijuVar_eq(State, sh, dMLT, dL, var)
!! Smooth a raiju variable with stencil in equatorial projection
type(raijuState_T), intent(in) :: State
type(ShellGrid_T), intent(in) :: sh
real(rp), intent(in) :: dMLT
real(rp), intent(in) :: dL
type(ShellGridVar_T), intent(inout) :: var
!! Variable to smooth. Assumes var%mask can be used to include/exclude points
integer :: i, j, j_eval, ipnt, jpnt
integer :: dj, dj_half, nGood
logical :: doIScan
real(rp) :: var_sum
real(rp) :: L_center, L_pnt
real(rp) :: dPhi_rad, dMLT_pnt, dL_pnt
real(rp), dimension(:,:), allocatable :: tmp_var_sm
if (.not. sh%isPhiUniform) then
write(*,*) "ERROR: raiCpl_calcTsclMHD expects raiCpl ShellGrid to have periodic phi, but it does not"
write(*,*) " Goodbye."
stop
endif
if (var%loc /= SHGR_CC ) then
write(*,*) "ERROR: raiCpl_calcTsclMHD expects raiCpl ShellGridVar to be located at cell center"
write(*,*) " Given var with location enum:",var%loc
write(*,*) " Goodbye."
stop
endif
allocate(tmp_var_sm(var%isv:var%iev, var%jsv:var%jev))
call fillArray(tmp_var_sm, 0.0_rp)
dphi_rad = dMLT * PI/12.0_rp ! Convert delta-MLT to a delta-phi in radians
dj_half = 0
do while(sh%ph(sh%js + dj_half + 1) - sh%phc(sh%js) < dphi_rad/2.0_rp) ! Increase dj_half until its dphi is half of target dphi_rad (you're welcome)
dj_half = dj_half + 1
enddo
!$OMP PARALLEL DO default(shared) &
!$OMP schedule(dynamic) &
!$OMP private(i, j, dj, doIScan) &
!$OMP private(nGood, var_sum, ipnt, jpnt, L_center, L_pnt, dL_pnt)
do j=sh%jsg,sh%jeg
do i=sh%isg,sh%ieg
if (.not. var%mask(i,j)) then
cycle
endif
L_center = norm2(State%xyzMincc(i,j,XDIR:YDIR)) ! XY plane to L shell / radius from planet
nGood = 0
var_sum = 0.0_rp
! Loop over j stencil range
do dj=-dj_half,dj_half
jpnt = j + dj
if (jpnt < sh%js) then
jpnt = jpnt + sh%Np
elseif (jpnt > sh%je) then
jpnt = jpnt - sh%Np
endif
! Sweep in i+ direction (earthward)
doIScan = .true.
ipnt = i
do while (doIScan)
L_pnt = norm2(State%xyzMincc(ipnt,jpnt,XDIR:YDIR))
dL_pnt = abs(L_center - L_pnt)
! First decide if we are gonna keep going after this point
if (ipnt >= sh%ieg .or. dL_pnt > dL/2.0_rp) then
doIScan = .false.
endif
! Decide if we should include this point
if (var%mask(ipnt,jpnt) .and. (dL_pnt < dL/2.0_rp)) then
nGood = nGood + 1
var_sum = var_sum + var%data(ipnt,jpnt)
endif
ipnt = ipnt + 1
enddo
! Same thing but in the -i direction
doIScan = .true.
ipnt = i-1
do while(doIScan)
L_pnt = norm2(State%xyzMincc(ipnt,jpnt,XDIR:YDIR))
dL_pnt = abs(L_center - L_pnt)
if (ipnt <= sh%isg .or. dL_pnt > dL/2.0_rp) then
doIScan = .false.
endif
if (var%mask(ipnt,jpnt) .and. (dL_pnt < dL/2.0_rp)) then
nGood = nGood + 1
var_sum = var_sum + var%data(ipnt,jpnt)
endif
ipnt = ipnt - 1
enddo
enddo
! Now calculate and save our average
if (nGood > 0) then
tmp_var_sm(i,j) = var_sum/(1.0_rp*nGood)
endif
enddo
enddo
! Put it back into the original variable and we're done
var%data(:,:) = tmp_var_sm(:,:)
end subroutine smoothRaijuVar_eq
end subroutine raiCpl_calcTsclMHD
!------
! Coupler I/O
!------
@@ -302,23 +434,24 @@ module raijuCplHelper
call writeShellGrid(raiCpl%shGr, ResF,"/ShellGrid")
call ClearIO(IOVars)
call AddOutVar(IOVars, "tLastUpdate", raiCpl%tLastUpdate, uStr="s")
call AddOutSGV(IOVars, "Pavg" , raiCpl%Pavg , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Davg" , raiCpl%Davg , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Pstd" , raiCpl%Pstd , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Dstd" , raiCpl%Dstd , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Bmin" , raiCpl%Bmin , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "xyzMin" , raiCpl%xyzMin , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "xyzMincc" , raiCpl%xyzMincc , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "topo" , raiCpl%topo , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "thcon" , raiCpl%thcon , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "phcon" , raiCpl%phcon , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "bvol" , raiCpl%bvol , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "bvol_cc" , raiCpl%bvol_cc , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "vaFrac" , raiCpl%vaFrac , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Tb" , raiCpl%Tb , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "pot_total" , raiCpl%pot_total , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "pot_corot" , raiCpl%pot_corot , doWriteMaskO=.true.)
call AddOutVar(IOVars, "tLastUpdate" , raiCpl%tLastUpdate , uStr="s")
call AddOutSGV(IOVars, "Pavg" , raiCpl%Pavg , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Davg" , raiCpl%Davg , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Pstd" , raiCpl%Pstd , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Dstd" , raiCpl%Dstd , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Bmin" , raiCpl%Bmin , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "xyzMin" , raiCpl%xyzMin , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "xyzMincc" , raiCpl%xyzMincc , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "topo" , raiCpl%topo , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "thcon" , raiCpl%thcon , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "phcon" , raiCpl%phcon , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "bvol" , raiCpl%bvol , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "bvol_cc" , raiCpl%bvol_cc , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "vaFrac" , raiCpl%vaFrac , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "Tb" , raiCpl%Tb , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "pot_total" , raiCpl%pot_total , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "pot_corot" , raiCpl%pot_corot , doWriteMaskO=.true.)
call AddOutSGV(IOVars, "tscl_mhdIngest", raiCpl%tscl_mhdIngest, doWriteMaskO=.true.)
call WriteVars(IOVars, .false., ResF)
call MapSymLink(ResF,lnResF)
end subroutine
@@ -347,23 +480,24 @@ module raijuCplHelper
raiCpl%tLastUpdate = GetIOReal(IOVars, "tLastUpdate")
! ShellGridVars
call ReadInSGV(raiCpl%Pavg ,ResF, "Pavg" , doIOpO=.false.)
call ReadInSGV(raiCpl%Davg ,ResF, "Davg" , doIOpO=.false.)
call ReadInSGV(raiCpl%Pstd ,ResF, "Pstd" , doIOpO=.false.)
call ReadInSGV(raiCpl%Dstd ,ResF, "Dstd" , doIOpO=.false.)
call ReadInSGV(raiCpl%Bmin ,ResF, "Bmin" , doIOpO=.false.)
call ReadInSGV(raiCpl%xyzMin ,ResF, "xyzMin" , doIOpO=.false.)
call ReadInSGV(raiCpl%xyzMincc ,ResF, "xyzMincc" , doIOpO=.false.)
call ReadInSGV(raiCpl%topo ,ResF, "topo" , doIOpO=.false.)
call ReadInSGV(raiCpl%thcon ,ResF, "thcon" , doIOpO=.false.)
call ReadInSGV(raiCpl%phcon ,ResF, "phcon" , doIOpO=.false.)
call ReadInSGV(raiCpl%bvol ,ResF, "bvol" , doIOpO=.false.)
call ReadInSGV(raiCpl%bvol_cc ,ResF, "bvol_cc" , doIOpO=.false.)
call ReadInSGV(raiCpl%vaFrac ,ResF, "vaFrac" , doIOpO=.false.)
call ReadInSGV(raiCpl%Tb ,ResF, "Tb" , doIOpO=.false.)
call ReadInSGV(raiCpl%pot_total,ResF, "pot_total", doIOpO=.false.)
call ReadInSGV(raiCpl%pot_corot,ResF, "pot_corot", doIOpO=.false.)
call ReadInSGV(raiCpl%Pavg ,ResF, "Pavg" , doIOpO=.false.)
call ReadInSGV(raiCpl%Davg ,ResF, "Davg" , doIOpO=.false.)
call ReadInSGV(raiCpl%Pstd ,ResF, "Pstd" , doIOpO=.false.)
call ReadInSGV(raiCpl%Dstd ,ResF, "Dstd" , doIOpO=.false.)
call ReadInSGV(raiCpl%Bmin ,ResF, "Bmin" , doIOpO=.false.)
call ReadInSGV(raiCpl%xyzMin ,ResF, "xyzMin" , doIOpO=.false.)
call ReadInSGV(raiCpl%xyzMincc ,ResF, "xyzMincc" , doIOpO=.false.)
call ReadInSGV(raiCpl%topo ,ResF, "topo" , doIOpO=.false.)
call ReadInSGV(raiCpl%thcon ,ResF, "thcon" , doIOpO=.false.)
call ReadInSGV(raiCpl%phcon ,ResF, "phcon" , doIOpO=.false.)
call ReadInSGV(raiCpl%bvol ,ResF, "bvol" , doIOpO=.false.)
call ReadInSGV(raiCpl%bvol_cc ,ResF, "bvol_cc" , doIOpO=.false.)
call ReadInSGV(raiCpl%vaFrac ,ResF, "vaFrac" , doIOpO=.false.)
call ReadInSGV(raiCpl%Tb ,ResF, "Tb" , doIOpO=.false.)
call ReadInSGV(raiCpl%pot_total ,ResF, "pot_total" , doIOpO=.false.)
call ReadInSGV(raiCpl%pot_corot ,ResF, "pot_corot" , doIOpO=.false.)
call ReadInSGV(raiCpl%tscl_mhdIngest,ResF, "tscl_mhdIngest", doIOpO=.false.)
end subroutine
end module raijuCplHelper
end module raijuCplHelper

View File

@@ -86,7 +86,7 @@ module gamCouple_mpi_G2V
! initialize F08 MPI objects
App%couplingComm = MPI_COMM_NULL
App%zeroArraytypes = (/ MPI_DATATYPE_NULL /)
App%zeroArraytypes(:) = (/ MPI_INTEGER /) ! = (/ MPI_DATATYPE_NULL /)
! split voltron helpers off of the communicator
! split couplingPoolComm into a communicator with only the non-helper voltron rank

View File

@@ -559,13 +559,13 @@ module gamCouple_mpi_V2G
subroutine sendGameraCplDataMpi(gCplApp, CouplingTargetT)
class(gamCouplerMpi_volt_T), intent(inout) :: gCplApp
real(rp), intent(in) :: CouplingTargetT
real(rp), intent(inout) :: CouplingTargetT
call sendShallowCplDataMpi(gCplApp)
if(gCplApp%doDeep) call sendDeepCplDataMpi(gCplApp)
call sendCplTimeMpi(gCplApp, CouplingTargetT)
end subroutine
end subroutine sendGameraCplDataMpi
subroutine sendShallowCplDataMpi(gCplApp)
class(gamCouplerMpi_volt_T), intent(inout) :: gCplApp
@@ -628,14 +628,14 @@ module gamCouple_mpi_V2G
subroutine sendCplTimeMpi(gCplApp, CouplingTargetT)
class(gamCouplerMpi_volt_T), intent(inout) :: gCplApp
real(rp), intent(in) :: CouplingTargetT
real(rp), intent(inout) :: CouplingTargetT
integer :: ierr
! Send Target Time for next coupling
call mpi_bcast(CouplingTargetT,1,MPI_MYFLOAT, gCplApp%myRank, gCplApp%couplingComm, ierr)
end subroutine
end subroutine sendCplTimeMpi
end module

View File

@@ -152,8 +152,8 @@ module voltapp_mpi
deallocate(vApp%ebTrcApp%ebSquish%blockStartIndices)
allocate(vApp%ebTrcApp%ebSquish%blockStartIndices(vApp%ebTrcApp%ebSquish%numSquishBlocks))
do b=1,vApp%ebTrcApp%ebSquish%numSquishBlocks
vApp%ebTrcApp%ebSquish%blockStartIndices(b) = vApp%ebTrcApp%ebState%ebGr%ks + &
((b-1)*(vApp%ebTrcApp%ebState%ebGr%ke+1))/vApp%ebTrcApp%ebSquish%numSquishBlocks
vApp%ebTrcApp%ebSquish%blockStartIndices(b) = &
GetAdjustedSquishStart(vApp,((b-1)*(vApp%ebTrcApp%ebState%ebGr%ke+1))/vApp%ebTrcApp%ebSquish%numSquishBlocks)
enddo
endif
call createLoadBalancer(vApp%squishLb, nHelpers,&
@@ -356,9 +356,10 @@ module voltapp_mpi
if(.not. vApp%doSerialMHD) call vApp%gApp%StartUpdateMhdData(vApp)
call Tic("DeepUpdate")
call Tic("DeepUpdate",.true.)
call DeepUpdate_mpi(vApp)
call Toc("DeepUpdate")
call Toc("DeepUpdate",.true.)
vApp%ts = vApp%ts + 1
if(vApp%doSerialMHD) call vApp%gApp%StartUpdateMhdData(vApp)
@@ -382,12 +383,33 @@ module voltapp_mpi
call convertGameraToRemix(vApp%mhd2mix, vApp%gApp, vApp%remixApp)
call Toc("G2R")
call MJDRecalc(vApp%MJD)
if (vApp%doGCM .and. vApp%time >=0 .and. vApp%gcmCplRank /= -1) then
call Tic("GCM2MIX")
call coupleGCM2MIX(vApp%gcm,vApp%remixApp%ion,vApp%MJD,vApp%time,vApp%mageCplComm,vApp%gcmCplRank)
call Toc("GCM2MIX")
end if
! tubes are only done after spinup
if(vApp%doDeep .and. vApp%time >= 0) then
if(vApp%useHelpers) call vhReqStep(vApp)
!Update i-shell to trace within in case rTrc has changed
vApp%iDeep = vApp%gApp%Grid%ie-1
!Pull in updated fields to CHIMP
call Tic("G2C")
call convertGameraToChimp(vApp%mhd2chmp,vApp%gApp,vApp%ebTrcApp)
call Toc("G2C")
if(vApp%useHelpers .and. vApp%doTubeHelp) then
call Tic("VoltTubes",.true.)
call VhReqTubeStart(vApp)
call Toc("VoltTubes",.true.)
endif
endif
! run remix
call Tic("ReMIX", .true.)
call runRemix(vApp)
@@ -402,21 +424,11 @@ module voltapp_mpi
! only do imag after spinup
if(vApp%doDeep .and. vApp%time >= 0) then
call Tic("DeepUpdate", .true.)
if(vApp%useHelpers) call vhReqStep(vApp)
! instead of PreDeep, use Tube Helpers and replicate other calls
!Update i-shell to trace within in case rTrc has changed
vApp%iDeep = vApp%gApp%Grid%ie-1
!Pull in updated fields to CHIMP
call Tic("G2C")
call convertGameraToChimp(vApp%mhd2chmp,vApp%gApp,vApp%ebTrcApp)
call Toc("G2C")
call Tic("VoltTubes",.true.)
if(vApp%useHelpers .and. vApp%doTubeHelp) then
call VhReqTubeStart(vApp)
! Tubes were started earlier
call vhReqTubeEnd(vApp)
! Now pack into tubeShell
call Tic("Tube2Shell")
@@ -440,8 +452,7 @@ module voltapp_mpi
call DoImag(vApp)
vApp%deepProcessingInProgress = .true.
call Toc("DeepUpdate", .true.)
else
elseif(vApp%doDeep) then
vApp%gApp%Grid%Gas0 = 0
!Load TM03 into Gas0 for ingestion during spinup
!Note: Using vApp%time instead of gamera time units
@@ -457,7 +468,6 @@ module voltapp_mpi
! only do imag after spinup with deep enabled
if(vApp%doDeep .and. vApp%time >= 0) then
call Tic("DeepUpdate", .true.)
do while(SquishBlocksRemain(vApp))
call Tic("Squish",.true.)
@@ -475,7 +485,6 @@ module voltapp_mpi
call SquishEnd(vApp)
call PostDeep(vApp, vApp%gApp)
call Toc("DeepUpdate", .true.)
endif
end subroutine endDeep
@@ -494,11 +503,9 @@ module voltapp_mpi
if(.not. vApp%deepProcessingInProgress) return
if(SquishBlocksRemain(vApp)) then
call Tic("DeepUpdate")
call Tic("Squish",.true.)
call DoSquishBlock(vApp)
call Toc("Squish",.true.)
call Toc("DeepUpdate")
endif
if(.not. SquishBlocksRemain(vApp)) then

View File

@@ -107,7 +107,7 @@ module volthelpers_mpi
! chimp data update functions
subroutine sendChimpStateData(ebState, vHelpComm)
type(ebState_T), intent(in) :: ebState
type(ebState_T), intent(inout) :: ebState
type(MPI_Comm), intent(in) :: vHelpComm
integer :: ierr, length
@@ -195,7 +195,7 @@ module volthelpers_mpi
call mpi_Abort(MPI_COMM_WORLD, 1, ierr)
end if
end subroutine
end subroutine sendChimpStateData
subroutine recvChimpStateData(ebState, vHelpComm)
type(ebState_T), intent(inout) :: ebState
@@ -272,7 +272,7 @@ module volthelpers_mpi
end subroutine
subroutine sendChimpUpdate(vApp)
type(voltAppMpi_T), intent(in) :: vApp
type(voltAppMpi_T), intent(inout) :: vApp
integer :: ierr, length
character( len = MPI_MAX_ERROR_STRING) :: message
@@ -317,7 +317,7 @@ module volthelpers_mpi
call mpi_Abort(MPI_COMM_WORLD, 1, ierr)
end if
end subroutine
end subroutine sendChimpUpdate
subroutine recvChimpUpdate(vApp)
type(voltAppMpi_T), intent(inout) :: vApp
@@ -374,14 +374,15 @@ module volthelpers_mpi
type(voltAppMpi_T), intent(in) :: vApp
integer, intent(in) :: rType
integer :: ierr
integer :: ierr,wtf
type(MPI_Request) :: helpReq
wtf = rType
! async to match waiting helper nodes
call mpi_Ibcast(rType, 1, MPI_INTEGER, 0, vApp%vHelpComm, helpReq, ierr)
call mpi_Ibcast(wtf, 1, MPI_INTEGER, 0, vApp%vHelpComm, helpReq, ierr)
call mpi_wait(helpReq, MPI_STATUS_IGNORE, ierr)
end subroutine
end subroutine vhRequestType
subroutine vhReqStep(vApp)
type(voltAppMpi_T), intent(inout) :: vApp

View File

@@ -12,6 +12,59 @@ module tubehelper
contains
subroutine init_TubeShell(sh, tubeShell, TioTeO)
type(ShellGrid_T), intent(in) :: sh
!! Voltron shellgrid
type(TubeShell_T), intent(inout) :: tubeShell
!! IMAGTubeShell object we are initializing
!logical, dimension(:,:), intent(in), optional :: maskO
real(rp), intent(in), optional :: TioteO
!! Default Ti/Te ratio
integer :: i
real(rp) :: tiote
if (present(TioteO)) then
tiote = TioteO
else
tiote = 4.0
endif
do i=1,NDIM
call initShellVar(sh, SHGR_CORNER, tubeShell%xyz0(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%X_bmin(i))
enddo
call initShellVar(sh, SHGR_CORNER, tubeShell%lat0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%lon0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%latc )
call initShellVar(sh, SHGR_CORNER, tubeShell%lonc )
call initShellVar(sh, SHGR_CORNER, tubeShell%invlat )
call initShellVar(sh, SHGR_CORNER, tubeShell%topo )
call initShellVar(sh, SHGR_CORNER, tubeShell%bmin )
call initShellVar(sh, SHGR_CORNER, tubeShell%bVol )
call initShellVar(sh, SHGR_CORNER, tubeShell%Lb )
call initShellVar(sh, SHGR_CORNER, tubeShell%Tb )
call initShellVar(sh, SHGR_CORNER, tubeShell%wMAG )
call initShellVar(sh, SHGR_CORNER, tubeShell%rCurv )
call initShellVar(sh, SHGR_CORNER, tubeShell%avgBeta)
do i=0,MAXTUBEFLUIDS
call initShellVar(sh, SHGR_CORNER, tubeShell%avgP(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%avgN(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%stdP(i))
call initShellVar(sh, SHGR_CORNER, tubeShell%stdN(i))
enddo
call initShellVar(sh, SHGR_CORNER, tubeShell%losscone )
call initShellVar(sh, SHGR_CORNER, tubeShell%lossconec)
call initShellVar(sh, SHGR_CORNER, tubeShell%TioTe0 )
call initShellVar(sh, SHGR_CORNER, tubeShell%nTrc )
tubeShell%TioTe0%data = tiote
end subroutine init_TubeShell
subroutine FakeTube(P,xyz0,bTube)
!! Given a seed xyz0 generate a fake tube that seems plausible
type(planet_T), intent(in) :: P
@@ -284,6 +337,11 @@ module tubehelper
bTube%nTrc = 0
!Zero pot
bTube%pot = 0.0
bTube%crpot = 0.0
bTube%potc = 0.0
end subroutine FreshenTube

View File

@@ -64,7 +64,7 @@ module voltCplHelper
call CleanLine(magLine)
!Note: Not using volt time b/c chimp wants time in its units
call genLine(ebApp%ebModel,ebApp%ebState,xyz0,ebApp%ebState%eb1%time, magLine,&
doShueO=.false.,doNHO=doNH,doSHO=doSH)
doNHO=doNH,doSHO=doSH)
call Line2Tube(ebApp,vApp%planet,magLine,vApp%State%ijTubes(i,j))
endif

View File

@@ -99,6 +99,8 @@ module voltapp
! adjust XMl reader root
call xmlInp%SetRootStr('Kaiju/Voltron')
! Make sure verbosity is still right after others do stuff with the reader
call xmlInp%SetVerbose(vApp%isLoud)
!Initialize planet information
call getPlanetParams(vApp%planet, xmlInp)
@@ -209,7 +211,7 @@ module voltapp
gApp%Model%t = vApp%time / gApp%Model%Units%gT0
gApp%State%time = gApp%Model%t
call genVoltShellGrid(vApp, xmlInp)
call genVoltShellGrid(vApp, xmlInp, gApp%Grid%Nkp)
call initVoltState(vApp)
endif
@@ -334,9 +336,10 @@ module voltapp
! update the next predicted coupling interval
vApp%DeepT = vApp%DeepT + vApp%DeepDT
call Tic("DeepUpdate")
call Tic("DeepUpdate",.true.)
call DeepUpdate(vApp, vApp%gApp)
call Toc("DeepUpdate")
call Toc("DeepUpdate",.true.)
vApp%ts = vApp%ts + 1
call vApp%gApp%StartUpdateMhdData(vApp)
@@ -456,19 +459,8 @@ module voltapp
call init_volt2Chmp(vApp,gApp)
endif
!Ensure chimp and voltron restart numbers match
! Actually chimp doesn't write restart files right now
!if (isRestart .and. vApp%IO%nRes /= ebTrcApp%ebModel%IO%nRes) then
! write(*,*) "Voltron and Chimp disagree on restart number, you should sort that out."
! write(*,*) "Error code: A house divided cannot stand"
! write(*,*) " Voltron nRes = ", vApp%IO%nRes
! write(*,*) " Chimp nRes = ", ebTrcApp%ebModel%IO%nRes
! stop
!endif
call init_mhd2Chmp(vApp%mhd2chmp, gApp, vApp%ebTrcApp)
call init_chmp2Mhd(vApp%chmp2mhd, vApp%ebTrcApp, gApp)
call init_raiju_mix(vApp%imagApp,vApp%remixApp)
vApp%iDeep = gApp%Grid%ie-1
@@ -483,9 +475,7 @@ module voltapp
! convert gamera inputs to remix
call MJDRecalc(vApp%MJD)
if (vApp%doDeep) then
! call mapIMagToRemix(vApp%imag2mix,vApp%remixApp) ! rcm style
! call mapRaijuToRemix(vApp)
if ( vApp%doDeep .and. (vApp%time>0) ) then
call CoupleIMagToMix(vApp)
endif
call mapGameraToRemix(vApp%mhd2mix, vApp%remixApp)
@@ -719,7 +709,8 @@ module voltapp
call inpXML%Set_Val(Model%epsds,'tracer/epsds',1.0e-2)
call setBackground(Model,inpXML)
call inpXML%Set_Val(Model%doDip,'tracer/doDip',.false.)
call setStreamline(Model,inpXML)
!Initialize ebState
if (gApp%Model%doMultiF) then
write(*,*) "Initializing MF-Chimp ..."
@@ -736,7 +727,8 @@ module voltapp
!Initialize squish indices
allocate(vApp%ebTrcApp%ebSquish%blockStartIndices(vApp%ebTrcApp%ebSquish%numSquishBlocks))
do b=1,vApp%ebTrcApp%ebSquish%numSquishBlocks
vApp%ebTrcApp%ebSquish%blockStartIndices(b) = ebGr%ks + ((b-1)*(ebGr%ke+1))/vApp%ebTrcApp%ebSquish%numSquishBlocks
vApp%ebTrcApp%ebSquish%blockStartIndices(b) = &
GetAdjustedSquishStart(vApp,((b-1)*(ebGr%ke+1))/vApp%ebTrcApp%ebSquish%numSquishBlocks)
enddo
!Do simple test to make sure locator is reasonable
@@ -751,11 +743,14 @@ module voltapp
end subroutine init_volt2Chmp
subroutine genVoltShellGrid(vApp, xmlInp)
subroutine genVoltShellGrid(vApp, xmlInp, gamRes)
class(voltApp_T) , intent(inout) :: vApp
type(XML_Input_T), intent(in) :: xmlInp
integer, intent(in) :: gamRes
character(len=strLen) :: gType
integer :: Nt_def, Np_def
!! Default number of active cells in theta and phi unless xml says otherwise
integer :: Nt, Np
!! Number of active cells in theta and phi
integer :: Ng
@@ -783,8 +778,30 @@ module voltapp
! Note: Nt is for a single hemisphere, we will manually double it in a minute
! TODO: This means we will always have even number of total cells, and a cell interfce right on the equator
! Can upgrade to allow for odd number later
call xmlInp%Set_Val(Nt, "grid/Nt", 180 ) ! 1 deg res default for uniform grid
call xmlInp%Set_Val(Np, "grid/Np", 360) ! 1 deg res default
! First determine defaults
if (gamRes<=64) then
! DBL
Nt_def = 90
Np_def = 180
else if (gamRes<=128) then
! QUAD
Nt_def = 180
Np_def = 360
else if (gamRes<=256) then
! OCT
Nt_def = 360
Np_def = 720
else
! HEX or above
! Idk good luck
Nt_def = 540
Np_def = 1440
endif
call xmlInp%Set_Val(Nt, "grid/Nt", Nt_def) ! 1 deg res default for uniform grid
call xmlInp%Set_Val(Np, "grid/Np", Np_def) ! 1 deg res default
! Ghost cells
call xmlInp%Set_Val(Ng, "grid/Ng", 4) ! # of ghosts in every direction
nGhosts = 0

View File

@@ -2,10 +2,10 @@ module voltappHelper
use kdefs
use voltTypes
use imagtubes
use kai2geo
use mixinterfaceutils
use tubehelper
implicit none
contains

View File

@@ -389,7 +389,7 @@ module voltio
type(IOVAR_T), dimension(MAXVOLTIOVAR) :: IOVars
real(rp) :: symh
integer :: is,ie,js,je
integer :: is,ie,js,je,nClkSteps
real(rp) :: Csijk,Con(NVAR)
real(rp) :: BSDst0,AvgBSDst,DPSDst,BSSMRs(4)
integer, dimension(4) :: outSGVBnds_corner
@@ -444,6 +444,16 @@ module voltio
call AddOutVar(IOVars,"MJD" ,vApp%MJD)
call AddOutVar(IOVars,"timestep",vApp%ts)
!Performance metrics
nClkSteps = readNCalls('DeepUpdate')
call AddOutVar(IOVars,"_perf_stepTime",readClock(1)/nClkSteps)
call AddOutVar(IOVars,"_perf_deepUpdateTime",readClock(1)/nClkSteps)
call AddOutVar(IOVars,"_perf_gamTime", readClock('GameraSync')/nClkSteps)
call AddOutVar(IOVars,"_perf_squishTime", (readClock('Squish')+readClock('VoltHelpers'))/nClkSteps)
call AddOutVar(IOVars,"_perf_imagTime", readClock('InnerMag')/nClkSteps)
call AddOutVar(IOVars,"_perf_mixTime", readClock('ReMIX')/nClkSteps)
call AddOutVar(IOVars,"_perf_tubesTime", readClock('VoltTubes')/nClkSteps)
call AddOutVar(IOVars,"_perf_ioTime", readClock('IO')/nClkSteps)
! voltState stuff
call AddOutSGV(IOVars, "Potential_total", vApp%State%potential_total, &

View File

@@ -360,7 +360,12 @@ def main():
)
if debug:
print(f"slack_response_summary = {slack_response_summary}")
# Also write a summary file to the root folder of this test
with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -378,7 +378,12 @@ def main():
)
if debug:
print(f"slack_response_summary = {slack_response_summary}")
# Also write a summary file to the root folder of this test
with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -9,8 +9,6 @@
#PBS -j oe
#PBS -m abe
# This script just builds the MAGE software.
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'

View File

@@ -384,7 +384,7 @@ def create_magnetosphere_quicklook_plots(xml_files: list):
os.chdir(results_dir)
# Create the quicklook plot.
cmd = f"msphpic.py -id {runid}"
cmd = f"msphpic -id {runid}"
_ = subprocess.run(cmd, shell=True, check=True)
# Move back to the starting directory.
@@ -464,7 +464,7 @@ def create_REMIX_quicklook_plots(xml_files: list):
os.chdir(results_dir)
# Create the quicklook plots.
cmd = f"mixpic.py -id {runid}"
cmd = f"mixpic -id {runid}"
_ = subprocess.run(cmd, shell=True, check=True)
# Add the plots to the lists.
@@ -517,83 +517,6 @@ def merge_REMIX_quicklook_plots(plots_north: list, plots_south: list):
return merged_plot_north, merged_plot_south
def create_RCM_quicklook_plots(xml_files: list):
"""Create the RCM quicklook plot for each run.
Create the RCM quicklook plot for each run.
Parameters
----------
xml_files : list of str
List of XML files describing each run.
Returns
-------
quicklook_plots : list of str
Path to each quicklook file.
Raises
------
None
"""
# Save the current directory.
cwd = os.getcwd()
# Make the RCM quicklook plot for each run.
quicklook_plots = []
for xml_file in xml_files:
# Extract the run ID.
runid = common.extract_runid(xml_file)
# Compute the path to the results directory.
results_dir = os.path.split(xml_file)[0]
# Move to the results directory.
os.chdir(results_dir)
# Create the quicklook plot.
cmd = f"rcmpic.py -id {runid}"
_ = subprocess.run(cmd, shell=True, check=True)
# Add the plot to the list.
path = os.path.join(results_dir, "qkrcmpic.png")
quicklook_plots.append(path)
# Return to the original directory.
os.chdir(cwd)
# Return the list of quicklook plots.
return quicklook_plots
def merge_RCM_quicklook_plots(quicklook_plots: list):
"""Merge the RCM quicklook plots for all runs.
Merge the RCM quicklook plots for all runs.
Parameters
----------
quicklook_plots : list of str
List of quicklook plots to merge.
Returns
-------
merged_plot : str
Path to merged quicklook file.
Raises
------
None
"""
# Merge RCM quicklooks.
merged_plot = "combined_qkrcmpic.png"
cmd = f"convert {' '.join(quicklook_plots)} -append {merged_plot}"
print(f"cmd = {cmd}")
_ = subprocess.run(cmd, shell=True, check=True)
return merged_plot
def compare_mage_runs(args):
"""Compare a set of MAGE model runs.
@@ -710,20 +633,6 @@ def compare_mage_runs(args):
print(f"merged_remix_plot_n = {merged_remix_plot_n}")
print(f"merged_remix_plot_s = {merged_remix_plot_s}")
# Create the RCM quicklook plots.
if verbose:
print("Creating RCM quicklook plots.")
rcm_plots = create_RCM_quicklook_plots(run_xml_files)
if debug:
print(f"rcm_plots = {rcm_plots}")
# Create the merged RCM quicklook plots.
if verbose:
print("Creating merged RCM quicklook plot.")
rcm_merged_plots = merge_RCM_quicklook_plots(rcm_plots)
if debug:
print(f"rcm_merged_plots = {rcm_merged_plots}")
# ------------------------------------------------------------------------
# Post images to Slack.
@@ -736,7 +645,6 @@ def compare_mage_runs(args):
"combined_msphpic.png",
"combined_remix_n.png",
"combined_remix_s.png",
"combined_qkrcmpic.png",
]
comments_to_post = [
"Real-Time Performance\n\n",
@@ -745,7 +653,6 @@ def compare_mage_runs(args):
"Magnetosphere Quicklook Comparison Plots\n\n",
"REMIX (north) Quicklook Comparison Plots\n\n",
"REMIX (south) Quicklook Comparison Plots\n\n",
"RCM Quicklook Comparison Plots\n\n",
]
# If loud mode is on, post results to Slack.

View File

@@ -41,6 +41,9 @@ from kaipy import kaiH5
# Program description.
DESCRIPTION = "Compare MAGE model runs numerically."
# Root of directory tree for this set of tests.
MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
# Strings to represent test pass and fail.
TEST_PASS = "PASS"
TEST_FAIL = "FAIL"
@@ -122,67 +125,6 @@ def compare_GAMERA_results(runxml1: str, runxml2: str, verbose: bool = False):
return TEST_PASS
def compare_MHDRCM_results(runxml1: str, runxml2: str, verbose: bool = False):
"""Numerically compare the MHD RCM output files from two runs.
Numerically compare the MHD RCM output files from two runs.
Parameters
----------
runxm1 : str
Path to XML file describing 1st run.
runxm2 : str
Path to XML file describing 2nd run.
verbose : bool
Set to True to print verbose information during comparison.
Returns
-------
TEST_PASS or TEST_FAIL : str
Description of result of comparison.
Raises
------
None
"""
# Determine the directories containing the sets of results.
dir1 = os.path.split(runxml1)[0]
dir2 = os.path.split(runxml2)[0]
# Generate a sorted list of output files for the 1st run.
pattern1 = os.path.join(dir1, "*.mhdrcm.h5")
files1 = glob.glob(pattern1)
files = [os.path.split(f)[1] for f in files1]
files.sort()
# Compare each output file in the two directories.
# Comparisons are done with h5diff, which must be in the PATH.
# Attributes of the steps and other top-level groups are excluded from
# comparison.
for filename in files:
file1 = os.path.join(dir1, filename)
file2 = os.path.join(dir2, filename)
if verbose:
print(f"Numerically comparing {file1} to {file2}.")
# Compare each step, without attributes.
_, step_ids = kaiH5.cntSteps(file1)
for step_id in step_ids:
step_path = f"/Step#{step_id}"
if verbose:
print(f" Comparing {step_path}.")
cmd = (
f"h5diff --exclude-attribute {step_path} {file1} {file2} "
f"{step_path}"
)
cproc = subprocess.run(cmd, shell=True, check=True)
if cproc.returncode != 0:
return TEST_FAIL
# Return the result of the comparison.
return TEST_PASS
def compare_REMIX_results(runxml1: str, runxml2: str, verbose: bool = False):
"""Numerically compare the REMIX output files from two runs.
@@ -244,67 +186,6 @@ def compare_REMIX_results(runxml1: str, runxml2: str, verbose: bool = False):
return TEST_PASS
def compare_RCM_results(runxml1: str, runxml2: str, verbose: bool = False):
"""Numerically compare the RCM output files from two runs.
Numerically compare the RCM output files from two runs.
Parameters
----------
runxm1 : str
Path to XML file describing 1st run.
runxm2 : str
Path to XML file describing 2nd run.
verbose : bool
Set to True to print verbose information during comparison.
Returns
-------
TEST_PASS or TEST_FAIL : str
Description of result of comparison.
Raises
------
None
"""
# Determine the directories containing the sets of results.
dir1 = os.path.split(runxml1)[0]
dir2 = os.path.split(runxml2)[0]
# Generate a sorted list of output files for the 1st run.
pattern1 = os.path.join(dir1, "*.rcm.h5")
files1 = glob.glob(pattern1)
files = [os.path.split(f)[1] for f in files1]
files.sort()
# Compare each result file in the two directories.
# Comparisons are done with h5diff, which must be in the PATH.
# Attributes of the steps and other top-level groups are excluded from
# comparison.
for filename in files:
file1 = os.path.join(dir1, filename)
file2 = os.path.join(dir2, filename)
if verbose:
print(f"Numerically comparing {file1} to {file2}.")
# Compare each step, without attributes.
_, step_ids = kaiH5.cntSteps(file1)
for step_id in step_ids:
step_path = f"/Step#{step_id}"
if verbose:
print(f" Comparing {step_path}.")
cmd = (
f"h5diff --exclude-attribute {step_path} {file1} {file2} "
f"{step_path}"
)
cproc = subprocess.run(cmd, shell=True, check=True)
if cproc.returncode != 0:
return TEST_FAIL
# Return the result of the comparison.
return TEST_PASS
def compare_VOLTRON_results(runxml1: str, runxml2: str, verbose: bool = False):
"""Numerically compare the VOLTRON output files from two runs.
@@ -414,14 +295,6 @@ def compare_mage_runs_numerical(args: dict):
print(f"comparison_result = {comparison_result}")
comparison_results.append(comparison_result)
# Compare the MHD RCM output files.
if verbose:
print("Comparing MHD RCM output files.")
comparison_result = compare_MHDRCM_results(runxml1, runxml2, verbose)
if debug:
print(f"comparison_result = {comparison_result}")
comparison_results.append(comparison_result)
# Compare the REMIX output files.
if verbose:
print("Comparing REMIX output files.")
@@ -430,14 +303,6 @@ def compare_mage_runs_numerical(args: dict):
print(f"comparison_result = {comparison_result}")
comparison_results.append(comparison_result)
# Compare the RCM output files.
if verbose:
print("Comparing RCM output files.")
comparison_result = compare_RCM_results(runxml1, runxml2, verbose)
if debug:
print(f"comparison_result = {comparison_result}")
comparison_results.append(comparison_result)
# Compare the VOLTRON output files.
if verbose:
print("Comparing VOLTRON output files.")
@@ -451,10 +316,8 @@ def compare_mage_runs_numerical(args: dict):
# Detail the test results.
test_report_details_string = ""
test_report_details_string += f"GAMERA: *{comparison_results[0]}*\n"
test_report_details_string += f"MHD RCM: *{comparison_results[1]}*\n"
test_report_details_string += f"REMIX: *{comparison_results[2]}*\n"
test_report_details_string += f"RCM: *{comparison_results[3]}*\n"
test_report_details_string += f"VOLTRON: *{comparison_results[4]}*\n"
test_report_details_string += f"REMIX: *{comparison_results[1]}*\n"
test_report_details_string += f"VOLTRON: *{comparison_results[2]}*\n"
# Summarize the test results.
if run_description is not None:
@@ -482,7 +345,12 @@ def compare_mage_runs_numerical(args: dict):
slack_client, test_report_details_string, thread_ts=thread_ts,
is_test=test
)
# Also write a summary file to the root folder of this test
with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -16,8 +16,7 @@
# source tree). Job submission reports to Slack only on failure, report run
# in Slack-on-fail (-s) mode.
15 00 * * * ssh derecho "/glade/campaign/hao/msphere/automated_kaiju_tests/kaiju-private/testingScripts/run_mage_tests.sh -v -b development 'unitTest.py -sv'" >> /glade/campaign/hao/msphere/automated_kaiju_tests/logs/nightly-tests-2-development.out 2>&1
# Disable master unit test for now since it hangs.
# 20 00 * * * ssh derecho "/glade/campaign/hao/msphere/automated_kaiju_tests/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'unitTest.py -sv'" >> /glade/campaign/hao/msphere/automated_kaiju_tests/logs/nightly-tests-2-master.out 2>&1
20 00 * * * ssh derecho "/glade/campaign/hao/msphere/automated_kaiju_tests/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'unitTest.py -sv'" >> /glade/campaign/hao/msphere/automated_kaiju_tests/logs/nightly-tests-2-master.out 2>&1
# Run a weekly dash every Sunday morning for the development branch and
# compare to reference development run listed in reference_runs.txt.

View File

@@ -20,21 +20,18 @@ echo 'The currently loaded modules are:'
module list
echo 'Loading python environment.'
export CONDARC="{{ condarc }}"
export CONDA_ENVS_PATH="{{ conda_envs_path }}"
mage_miniconda3="{{ mage_test_root }}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
__conda_setup="$('/glade/u/home/ewinter/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
if [ -f "/glade/u/home/ewinter/miniconda3/etc/profile.d/conda.sh" ]; then
. "/glade/u/home/ewinter/miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
export PATH="/glade/u/home/ewinter/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo "The current conda environment is ${CONDA_PREFIX}."
@@ -42,10 +39,6 @@ echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo "The kaiju software is located at ${KAIJUHOME}."
echo 'Setting up kaipy environment.'
source {{ kaipy_private_root }}/kaipy/scripts/setupEnvironment.sh
echo "The kaipy software is located at ${KAIPYHOME}."
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
@@ -82,10 +75,10 @@ echo 'Generating the solar wind boundary condition file.'
{{ cda2wind_cmd }}
echo "The solar wind boundary condition file is `ls bcwind.h5`."
# Generate the RCM configuration file.
echo 'Generating the RCM configuration file.'
{{ genRCM_cmd }}
echo "The RCM configuration file is `ls rcmconfig.h5`."
# Generate the RAIJU configuration file.
echo 'Generating the RAIJU configuration file.'
genRAIJU
echo "The RAIJU configuration file is `ls raijuconfig.h5`."
# Moved this setting here to avoid error from genLFM.py.
export OMP_NUM_THREADS=128
@@ -99,7 +92,7 @@ eval $cmd
# Run the comparison. Post results to Slack if any test fails.
reference_run=`cat /glade/u/home/ewinter/mage_testing/test_runs/derecho_configuration_check_runs.txt`
cmd="python $KAIJUHOME/testingScripts/compare_mage_runs_numerical.py -sv --run_description='`derecho` configuration check' $reference_run `pwd`/weeklyDashGo.xml >& compare_mage_runs_numerical.out"
cmd="python $KAIJUHOME/testingScripts/compare_mage_runs_numerical.py -sv --run_description='derecho configuration check' $reference_run `pwd`/weeklyDashGo.xml >& compare_mage_runs_numerical.out"
echo "Run comparison command is:"
echo $cmd
eval $cmd

View File

@@ -143,16 +143,13 @@ def derecho_configuration_check(args: dict):
make_cmd = "make voltron_mpi.x"
# Create the command to generate the LFM grid.
genLFM_cmd = "genLFM.py -gid Q"
genLFM_cmd = "genLFM -gid Q"
# Create the command to generate the solar wind boundary condition file.
cda2wind_cmd = (
"cda2wind.py -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
"cda2wind -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
)
# Create the command to generate the RCM configuration.
genRCM_cmd = "genRCM.py"
# Create the command for launching an MPI program.
mpiexec_cmd = f"mpiexec {KAIJUHOME}/scripts/preproc/pinCpuCores.sh"
@@ -179,13 +176,10 @@ def derecho_configuration_check(args: dict):
pbs_options["job_priority"] = os.environ["DERECHO_TESTING_PRIORITY"]
pbs_options["walltime"] = "08:00:00"
pbs_options["modules"] = module_names
pbs_options["condarc"] = os.environ["CONDARC"]
pbs_options["conda_envs_path"] = os.environ["CONDA_ENVS_PATH"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["kaipy_private_root"] = os.environ["KAIPY_PRIVATE_ROOT"]
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["branch_or_commit"] = os.environ["BRANCH_OR_COMMIT"]
@@ -193,7 +187,6 @@ def derecho_configuration_check(args: dict):
pbs_options["make_cmd"] = make_cmd
pbs_options["genLFM_cmd"] = genLFM_cmd
pbs_options["cda2wind_cmd"] = cda2wind_cmd
pbs_options["genRCM_cmd"] = genRCM_cmd
pbs_options["mpiexec_cmd"] = mpiexec_cmd
pbs_options["voltron_cmd"] = voltron_cmd

View File

@@ -19,33 +19,37 @@ module load {{ module }}
module list
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate kaiju-3.8-testing
conda activate {{ conda_environment }}
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
source {{ kaipyhome }}/kaipy/scripts/setupEnvironment.sh
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
export DERECHO_TESTING_ACCOUNT={{ account }}
export BRANCH_OR_COMMIT={{ branch_or_commit }}
export MAGE_TEST_SET_ROOT={{ mage_test_set_root }}
echo 'The active environment variables are:'
printenv

View File

@@ -81,7 +81,6 @@ BUILD_BIN_DIR = "bin"
# Data and configuration files used by the Intel Inspector tests.
TEST_INPUT_FILES = [
"tinyCase.xml",
"bcwind.h5",
"memSuppress.sup",
"threadSuppress.sup",
]
@@ -302,11 +301,31 @@ def intelChecks(args: dict):
from_path = os.path.join(TEST_SCRIPTS_DIRECTORY, filename)
to_path = os.path.join(".", filename)
shutil.copyfile(from_path, to_path)
# Generate bcwind data file.
if verbose:
print("Creating bcwind data file.")
cmd = "cda2wind -t0 2016-08-09T09:00:00 -t1 2016-08-09T11:00:00"
if debug:
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print("ERROR: Unable to create bcwind data file for module set "
f"{module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See testing log for output from cda2wind.\n"
"Skipping remaining steps for module set"
f"{module_set_name}\n")
continue
if debug:
print(f"cproc = {cproc}")
# Generate the LFM grid file.
if verbose:
print("Creating LFM grid file.")
cmd = "genLFM.py -gid D"
cmd = "genLFM -gid D"
if debug:
print(f"cmd = {cmd}")
try:
@@ -316,27 +335,27 @@ def intelChecks(args: dict):
f"{module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See testing log for output from genLFM.py.\n"
"See testing log for output from genLFM.\n"
"Skipping remaining steps for module set"
f"{module_set_name}\n")
continue
if debug:
print(f"cproc = {cproc}")
# Generate the RCM configuration file.
# Generate the Raiju configuration file.
if verbose:
print("Creating RCM configuration file.")
cmd = "genRCM.py"
print("Creating Raiju configuration file.")
cmd = "genRAIJU"
if debug:
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print("ERROR: Unable to create RCM configuration file"
print("ERROR: Unable to create Raiju configuration file"
f" for module set {module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See testing log for output from genRCM.py.\n"
"See testing log for output from genRAIJU.\n"
"Skipping remaining steps for module set "
f"{module_set_name}\n")
continue
@@ -354,11 +373,12 @@ def intelChecks(args: dict):
pbs_options["job_priority"] = os.environ["DERECHO_TESTING_PRIORITY"]
pbs_options["modules"] = module_names
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["kaipyhome"] = os.environ["KAIPYHOME"]
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["branch_or_commit"] = BRANCH_OR_COMMIT
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
# Set options specific to the memory check, then render the template.
pbs_options["job_name"] = "mage_intelCheckSubmitMem"

View File

@@ -35,6 +35,9 @@ DESCRIPTION = 'Create report for Intel Inspector tests.'
# Branch or commit (or tag) used for testing.
BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
# Root of directory tree for this set of tests.
MAGE_TEST_SET_ROOT = os.environ["MAGE_TEST_SET_ROOT"]
def main():
"""Begin main program.
@@ -255,7 +258,12 @@ def main():
)
if debug:
print(f"slack_response_summary = {slack_response_summary}")
# Also write a summary file to the root folder of this test
with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -20,21 +20,26 @@ echo 'The currently loaded modules are:'
module list
echo 'Loading python environment.'
export CONDARC="{{ condarc }}"
export CONDA_ENVS_PATH="{{ conda_envs_path }}"
mage_miniconda3="{{ mage_test_root }}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo "The current conda environment is ${CONDA_PREFIX}."
@@ -42,10 +47,6 @@ echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo "The kaiju software is located at ${KAIJUHOME}."
echo 'Setting up kaipy environment.'
source {{ kaipy_private_root }}/kaipy/scripts/setupEnvironment.sh
echo "The kaipy software is located at ${KAIPYHOME}."
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}

View File

@@ -84,6 +84,35 @@ MAGE_REPRODUCIBILITY_CHECK_PBS_TEMPLATE_FILE = os.path.join(
MAGE_REPRODUCIBILITY_CHECK_PBS_SCRIPT = "mage_reproducibility_check.pbs"
def create_command_line_parser():
"""Create the command-line argument parser.
Create the parser for command-line arguments.
Parameters
----------
None
Returns
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
Raises
------
None
"""
parser = common.create_command_line_parser(DESCRIPTION)
parser.add_argument(
"--module_set_file", "-f", default=DEFAULT_MODULE_SET_FILE,
help=(
"Path to text file containing set of modules to build with "
"(default: %(default)s)"
)
)
return parser
def mage_reproducibility_check(args: dict):
"""Perform a MAGE reproducibility check.
@@ -146,7 +175,6 @@ def mage_reproducibility_check(args: dict):
module_names, cmake_environment, cmake_options = (
common.read_build_module_list_file(module_set_file)
)
print(f"module_names = {module_names}")
# Extract the name of the list.
module_set_name = os.path.split(module_set_file)[-1].rstrip(".lst")
@@ -213,11 +241,11 @@ def mage_reproducibility_check(args: dict):
pbs_template = Template(template_content)
# Assemble commands needed in the PBS script.
genLFM_cmd = "genLFM.py -gid Q"
genLFM_cmd = "genLFM -gid Q"
cda2wind_cmd = (
"cda2wind.py -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
"cda2wind -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
)
genRCM_cmd = "genRCM.py"
genRaiju_cmd = "genRAIJU"
mpiexec_cmd = f"mpiexec {KAIJUHOME}/scripts/preproc/pinCpuCores.sh"
voltron_cmd = "../bin/voltron_mpi.x weeklyDashGo.xml"
@@ -240,13 +268,10 @@ def mage_reproducibility_check(args: dict):
pbs_options["job_priority"] = os.environ["DERECHO_TESTING_PRIORITY"]
pbs_options["walltime"] = "08:00:00"
pbs_options["modules"] = module_names
pbs_options["condarc"] = os.environ["CONDARC"]
pbs_options["conda_envs_path"] = os.environ["CONDA_ENVS_PATH"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["kaipy_private_root"] = os.environ["KAIPY_PRIVATE_ROOT"]
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["branch_or_commit"] = os.environ["BRANCH_OR_COMMIT"]
@@ -254,7 +279,7 @@ def mage_reproducibility_check(args: dict):
pbs_options["make_cmd"] = make_cmd
pbs_options["genLFM_cmd"] = genLFM_cmd
pbs_options["cda2wind_cmd"] = cda2wind_cmd
pbs_options["genRCM_cmd"] = genRCM_cmd
pbs_options["genRaiju_cmd"] = genRaiju_cmd
pbs_options["mpiexec_cmd"] = mpiexec_cmd
pbs_options["voltron_cmd"] = voltron_cmd
@@ -324,13 +349,10 @@ def mage_reproducibility_check(args: dict):
pbs_options["job_priority"] = os.environ["DERECHO_TESTING_PRIORITY"]
pbs_options["walltime"] = "02:00:00"
pbs_options["modules"] = module_names
pbs_options["condarc"] = os.environ["CONDARC"]
pbs_options["conda_envs_path"] = os.environ["CONDA_ENVS_PATH"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["kaipy_private_root"] = os.environ["KAIPY_PRIVATE_ROOT"]
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["branch_or_commit"] = os.environ["BRANCH_OR_COMMIT"]
@@ -401,7 +423,7 @@ def mage_reproducibility_check(args: dict):
def main():
"""Driver for command-line version of code."""
# Set up the command-line parser.
parser = common.create_command_line_parser(DESCRIPTION)
parser = create_command_line_parser()
# # Add additional arguments specific to this script.
# parser.add_argument(

View File

@@ -165,6 +165,20 @@ Suppression = {
}
}
Suppression = {
Name = "MPI-related gamera initialization False Positive";
Type = { uninitialized_memory_access }
Stacks = {
{
...;
func=writeh5res, src=gioH5.F90;
}
allocation = {
func=voltron_mpix, src=voltron_mpix.F90;
}
}
}
Suppression = {
Name = "MPI-related gamera initialization False Positive";
Type = { uninitialized_memory_access }

View File

@@ -12,23 +12,29 @@
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate kaiju-3.8-testing
conda activate {{ conda_environment }}
#echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
@@ -48,7 +54,10 @@ echo 'The active environment variables are:'
printenv
# Process the data and generate the output video
python $KAIPYHOME/kaipy/scripts/quicklook/gamerrVid.py -d1 {{ case1F }} -id1 {{ case1id }} -d2 {{ case2F }} -id2 {{ case2id }} -o {{ frameFolder }}/{{ caseName }} -ts {{ ts }} -te {{ te }} -dt {{ dt }} -Nth 9 >& {{ caseName }}.out
gamerrVid -d1 {{ case1F }} -id1 {{ case1id }} -d2 {{ case2F }} -id2 {{ case2id }} -o {{ frameFolder }}/{{ caseName }} -ts {{ ts }} -te {{ te }} -dt {{ dt }} -Nth 9 >& {{ caseName }}.out
# copy output video to test root folder
cp {{ frameFolder }}/{{ caseName }}.mp4 $MAGE_TEST_SET_ROOT/.
echo "Job $PBS_JOBID ended at `date` on `hostname` in directory `pwd`."

View File

@@ -12,23 +12,30 @@
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate kaiju-3.8-testing
conda activate {{ conda_environment }}
#echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh

View File

@@ -23,23 +23,30 @@ module load {{ module }}
module list
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate kaiju-3.8-testing
conda activate {{ conda_environment }}
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh

View File

@@ -3,9 +3,9 @@
<Kaiju>
<VOLTRON>
<time tFin="7201.0"/>
<spinup doSpin="T" tSpin="3600.0"/>
<spinup doSpin="T" tSpin="3600.0" tIO="-600.0"/>
<output dtOut="60.0" tsOut="100"/>
<coupling dtCouple="5.0" imType="RCM" doQkSquish="F" qkSquishStride="2" doAsyncCoupling="F" doSerial="{{ serial_coupling }}"/>
<coupling dtCouple="5.0" imType="RAIJU" doQkSquish="F" qkSquishStride="2" doAsyncCoupling="F" doSerial="{{ serial_coupling }}"/>
<restart dtRes="3600.0"/>
<imag doInit="T"/>
<threading NumTh="128"/>
@@ -37,10 +37,15 @@
<conductance doStarlight="T" doRamp="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<rcmdomain domType="ELLIPSE"/>
<ellipse xSun="12.5" yDD="15.0" xTail="-20.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<plasmasphere isDynamic="T" initKp="2" doRefill="T" tAvg="600.0"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75" doSmoothgrads="F"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -39,9 +39,6 @@ TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'compTest')
# Home directory of kaiju installation
KAIJUHOME = os.environ['KAIJUHOME']
# Home directory of kaipy installation
KAIPYHOME = os.environ['KAIPYHOME']
# Path to directory containing the test scripts
TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, 'testingScripts')
@@ -160,7 +157,7 @@ def generateAndRunCase(caseName,pbsTemplate,pbs_options,xmlTemplate,xml_options,
shutil.copy2('../voltron.x', './voltron.x')
shutil.copy2('../voltron_mpi.x', './voltron_mpi.x')
shutil.copy2('../lfmD.h5', './lfmD.h5')
shutil.copy2('../rcmconfig.h5', './rcmconfig.h5')
shutil.copy2('../raijuconfig.h5', './raijuconfig.h5')
shutil.copy2('../bcwind.h5', './bcwind.h5')
# Submit the job
if verbose:
@@ -610,7 +607,7 @@ def main():
# Generate the LFM grid file.
if verbose:
print('Creating LFM grid file.')
cmd = 'genLFM.py -gid D'
cmd = 'genLFM -gid D'
if debug:
print(f"cmd = {cmd}")
try:
@@ -620,7 +617,7 @@ def main():
f"{module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
'See testing log for output from genLFM.py.\n'
'See testing log for output from genLFM.\n'
'Skipping remaining steps for module set'
f"{module_set_name}\n")
continue
@@ -628,7 +625,7 @@ def main():
# Generate the solar wind boundary condition file.
if verbose:
print('Creating solar wind initial conditions file.')
cmd = 'cda2wind.py -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00'
cmd = 'cda2wind -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00'
if debug:
print(f"cmd = {cmd}")
try:
@@ -638,25 +635,25 @@ def main():
f" for module set {module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
'See testing log for output from cda2wind.py.\n'
'See testing log for output from cda2wind.\n'
'Skipping remaining steps for module set'
f"{module_set_name}\n")
continue
# Generate the RCM configuration file.
# Generate the Raiju configuration file.
if verbose:
print('Creating RCM configuration file.')
cmd = 'genRCM.py'
print('Creating Raiju configuration file.')
cmd = 'genRAIJU'
if debug:
print(f"cmd = {cmd}")
try:
_ = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print('ERROR: Unable to create RCM configuration file'
print('ERROR: Unable to create Raiju configuration file'
f" for module set {module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
'See testing log for output from genRCM.py.\n'
'See testing log for output from genRAIJU.\n'
'Skipping remaining steps for module set '
f"{module_set_name}\n")
continue
@@ -668,12 +665,12 @@ def main():
base_pbs_options['job_priority'] = os.environ['DERECHO_TESTING_PRIORITY']
base_pbs_options['modules'] = module_names
base_pbs_options['kaijuhome'] = KAIJUHOME
base_pbs_options['kaipyhome'] = KAIPYHOME
base_pbs_options['tmpdir'] = os.environ['TMPDIR']
base_pbs_options['slack_bot_token'] = os.environ['SLACK_BOT_TOKEN']
base_pbs_options['mage_test_root'] = os.environ['MAGE_TEST_ROOT']
base_pbs_options['mage_test_set_root'] = os.environ['MAGE_TEST_SET_ROOT']
base_pbs_options['branch_or_commit'] = os.environ['BRANCH_OR_COMMIT']
base_pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
base_pbs_options['report_options'] = ''
if debug:
base_pbs_options['report_options'] += ' -d'

195
testingScripts/runLocalTests.py Executable file
View File

@@ -0,0 +1,195 @@
#!/usr/bin/env python
import argparse
import os
import subprocess
import pathlib
import datetime
def create_command_line_parser():
"""Create the command-line argument parser.
Create the parser for command-line arguments.
Returns
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
"""
parser = argparse.ArgumentParser(description="Script to help setup automated tests within a kaiju repo")
parser.add_argument(
"-A", default="",
help="Charge code to use when running tests."
)
parser.add_argument(
"-ce", default="",
help="Conda environment name to load with conda module"
)
parser.add_argument(
"-p", default="economy",
help="Job priority to use when running tests (default: %(default)s)."
)
parser.add_argument(
"--unitTests", action='store_true',default=False,
help="Run unit tests (default: %(default)s)."
)
parser.add_argument(
"--weeklyDash", action='store_true',default=False,
help="Run weekly dash (default: %(default)s)."
)
parser.add_argument(
"--compTests", action='store_true',default=False,
help="Run default subset of comparative tests (default: %(default)s)."
)
parser.add_argument(
"--compTestsFull", action='store_true',default=False,
help="Run full suite of comparative tests (over-rides --compTests) (default: %(default)s)."
)
parser.add_argument(
"--buildTests", action='store_true',default=False,
help="Run build tests (default: %(default)s)."
)
parser.add_argument(
"--icTests", action='store_true',default=False,
help="Run tests to build Initial Condition files (default: %(default)s)."
)
parser.add_argument(
"--intelChecks", action='store_true',default=False,
help="Run Intel Inspector memory and thread check tests (default: %(default)s)."
)
parser.add_argument(
"--reproTests", action='store_true',default=False,
help="Run reproducibility tests (default: %(default)s)."
)
parser.add_argument(
"--all", action='store_true',default=False,
help="Run all tests (default: %(default)s)."
)
return parser
def main():
"""Helper script to run automated tests locally inside a kaiju repository
"""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
# Adjust test options
if args.all:
args.unitTests = True
args.weeklyDash = True
args.compTests = True
args.compTestsFull = True
args.buildTests = True
args.icTests = True
args.intelChecks = True
args.reproTests = True
if args.compTestsFull:
args.compTests = False
if not (args.unitTests or args.weeklyDash or args.compTests or args.compTestsFull or
args.buildTests or args.icTests or args.intelChecks or args.reproTests):
parser.print_help()
exit()
# find repo home directory
called_from = os.path.dirname(os.path.abspath(__file__))
os.chdir(called_from)
os.chdir('..')
homeDir = os.getcwd()
# Check for necessary environment variables
if len(args.ce) == 0 and 'CONDA_DEFAULT_ENV' not in os.environ:
print("A conda environment name was not supplied, and a currently loaded conda environment could not be determined.")
print("Please either supply the name of a conda environment with the '-ce <name>' option,")
print(" or load an entironment before running this script, and it should be automatically found.")
exit()
elif len(args.ce) == 0:
args.ce = os.environ['CONDA_DEFAULT_ENV']
print(f"Automatically setting conda environment to {args.ce}")
if len(args.A) == 0 and (args.unitTests or args.weeklyDash or
args.compTests or args.compTestsFull or args.intelChecks or args.reproTests):
print("A charge code with not supplied, but the requested tests require one.")
print("Please supply a charge code with the -A # option.")
exit()
if 'KAIJUHOME' not in os.environ:
os.environ['KAIJUHOME'] = homeDir
print(f"Running tests out of local git repository: {homeDir}")
if pathlib.Path(homeDir).resolve() != pathlib.Path(os.environ['KAIJUHOME']).resolve():
print("The setupEnvironment.sh script must be sourced for the repo this script resides in before calling it.")
exit()
if 'KAIPYHOME' not in os.environ and (args.weeklyDash or args.compTests or args.compTestsFull):
print("The 'KAIPYHOME' environment variable was not set, but the requested tests require it.")
print("The setupEnvironment.sh script for ANY kaipy repo must be sourced before running these tests.")
exit()
elif 'KAIPYHOME' not in os.environ:
os.environ['KAIPYHOME'] = ""
# Set environment variables
os.environ['MAGE_TEST_ROOT'] = homeDir
os.environ['MAGE_TEST_RUNS_ROOT']=os.path.join(os.environ['MAGE_TEST_ROOT'],"test_runs")
os.environ['DERECHO_TESTING_ACCOUNT'] = args.A
os.environ['DERECHO_TESTING_QUEUE'] = 'main'
os.environ['DERECHO_TESTING_PRIORITY'] = args.p
os.environ['SLACK_BOT_TOKEN'] = '' # help ensure no accidental reporting to slack
os.environ['PYTHONUNBUFFERED']='TRUE'
os.environ['CONDA_ENVIRONMENT']=args.ce
gitBranch = subprocess.run(['git','branch','--show-current'], stdout=subprocess.PIPE).stdout.decode('utf-8')
if 'not a git repository' in gitBranch:
print("This script must be executed inside a kaiju git repository")
exit()
gitBranch = gitBranch.strip()
os.environ['BRANCH_OR_COMMIT'] = gitBranch
currenttime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
test_set_dir = f"{currenttime}-{gitBranch}"
os.environ['MAGE_TEST_SET_ROOT'] = os.path.join(os.environ['MAGE_TEST_RUNS_ROOT'],test_set_dir)
os.makedirs(os.environ['MAGE_TEST_SET_ROOT'], exist_ok=True)
os.chdir(os.environ['MAGE_TEST_SET_ROOT'])
os.environ['CONDARC'] = '' # these must be specified to avoid errors
os.environ['CONDA_ENVS_PATH'] = ''
os.environ['KAIPY_PRIVATE_ROOT'] = os.environ['KAIPYHOME'] # some scripts use this alternate
print(f"Running tests on branch {gitBranch}")
if len(args.A) > 0:
print(f"Using charge code {args.A} with priority {args.p}")
print(f"Running in folder test_runs/{test_set_dir}")
print("")
# Run Tests
if args.unitTests:
print("Running unit tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','unitTest.py'),'-tv'])
if args.weeklyDash:
print("Running weekly dash")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','weeklyDash.py'),'-tv'])
if args.compTests:
print("Running default comparative tests subset")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','relativeTests.py'),'-tv'])
if args.compTestsFull:
print("Running full comparative tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','relativeTests.py'),'-tva'])
if args.buildTests:
print("Running build tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','buildTest.py'),'-tv'])
if args.icTests:
print("Running Initial Condition tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','ICtest.py'),'-tv'])
if args.intelChecks:
print("Running memory and thread tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','intelChecks.py'),'-tv'])
if args.reproTests:
print("Running reproducibility tests")
subprocess.call(['python', os.path.join(os.environ['MAGE_TEST_ROOT'],'testingScripts','mage_reproducibility_check.py'),'-tv'])
if __name__ == "__main__":
main()

View File

@@ -20,21 +20,26 @@ echo 'The currently loaded modules are:'
module list
echo 'Loading python environment.'
export CONDARC="{{ condarc }}"
export CONDA_ENVS_PATH="{{ conda_envs_path }}"
mage_miniconda3="{{ mage_test_root }}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo "The current conda environment is ${CONDA_PREFIX}."
@@ -42,10 +47,6 @@ echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo "The kaiju software is located at ${KAIJUHOME}."
echo 'Setting up kaipy environment.'
source {{ kaipy_private_root }}/kaipy/scripts/setupEnvironment.sh
echo "The kaipy software is located at ${KAIPYHOME}."
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
@@ -69,10 +70,10 @@ echo 'Generating the solar wind boundary condition file.'
{{ cda2wind_cmd }}
echo "The solar wind boundary condition file is `ls bcwind.h5`."
# Generate the RCM configuration file.
echo 'Generating the RCM configuration file.'
{{ genRCM_cmd }}
echo "The RCM configuration file is `ls rcmconfig.h5`."
# Generate the Raiju configuration file.
echo 'Generating the Raiju configuration file.'
{{ genRaiju_cmd }}
echo "The Raiju configuration file is `ls raijuconfig.h5`."
# Run the model.
MPICOMMAND="{{ mpiexec_cmd }}"

381
testingScripts/run_mage_tests.sh Executable file
View File

@@ -0,0 +1,381 @@
#!/usr/bin/bash
# ############################################################################
# IMPORTANT NOTES:
# This bash script was designed to run from a cron job on the 'cron' host at
# NCAR, using ssh to execute this script on a derecho login node. See
# kaiju/testingScripts/crontab for an example of how to invoke this script.
# SSH must be configured so that ssh from cron to derecho does not require a
# password.
# ############################################################################
echo '***********************************************************************'
echo "Starting $0 at `date` on `hostname`."
# ############################################################################
# Force this script to exit on any failure.
set -e
# ----------------------------------------------------------------------------
# These *should* be the only variables you have to change when moving the
# testing environment around or changing python environments.
# Root of kaiju testing environment - *everything* goes under here, except
# for the outputs from the individual test runs.
export MAGE_TEST_ROOT='/glade/campaign/hao/msphere/automated_kaiju_tests'
# Root of kaiju test results tree.
export MAGE_TEST_RESULTS_ROOT='/glade/derecho/scratch/ewinter/mage_testing'
# Location of the miniconda installation used for testing.
export MAGE_MINICONDA='/glade/u/home/ewinter/miniconda3'
# Setup command for conda.
__conda_setup="$('/glade/u/home/ewinter/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
# conda environment for testing
export CONDA_ENVIRONMENT='kaiju-3.12-testing'
# ----------------------------------------------------------------------------
# Other exported environment variables
# Path to directory containing the dateime-stamped directories for individual
# sets of test runs.
export MAGE_TEST_RUNS_ROOT="${MAGE_TEST_RESULTS_ROOT}/test_runs"
# PBS account to use for running tests on derecho
export DERECHO_TESTING_ACCOUNT='P28100045'
# PBS queue to use for running tests on derecho
export DERECHO_TESTING_QUEUE='main'
# PBS priority to use for running tests on derecho
export DERECHO_TESTING_PRIORITY='economy'
# Set the token for sending messages to Slack.
export SLACK_BOT_TOKEN=`cat $HOME/.ssh/slack.txt`
# IMPORTANT: Set this environment variable to force the python print()
# function to automatically flush its output in all of the testing scripts.
# This will ensure that output from the testing scripts is logged in the order
# that it is created.
export PYTHONUNBUFFERED='TRUE'
# ----------------------------------------------------------------------------
# Non-exported variables used by this script
# Path to the SSH key to use for running the tests. The key must have no
# passphrase. This is needed to allow passwordless access to BitBucket.
ssh_key_for_testing='/glade/u/home/ewinter/.ssh/id_rsa_kaiju_testing'
# Setup script for CDF code
cdf_setup_script="${MAGE_TEST_ROOT}/local/cdf/3.9.0/bin/definitions.B"
# Address of kaiju-private repository on BitBucket.
kaiju_repository='git@bitbucket.org:aplkaiju/kaiju-private.git'
# Name of local directory containing clone of repository.
local_kaiju_name='kaiju'
# Default kaiju code branch to test if no branch, commit, or tag is specified.
default_branch_to_test='development'
# Output string to separate results from different tests.
test_output_separator='------------------------------------------------------'
# ############################################################################
# Define the command-line help function.
Help()
{
# Display Help
echo 'Control script for running MAGE tests via cron and ssh.'
echo
echo "Syntax: run_mage_tests.sh [-b branch] [-c commit] [-d] [-h] [-v] 'test1[,test2,...]'"
echo 'options:'
echo 'b branch Run tests using this branch'
echo 'c commit Run tests using this commit (or tag)'
echo 'd Use debug mode.'
echo 'h Print this help message.'
echo 'v Use verbose mode.'
echo
echo 'If no branch or commit is specified, the latest commit on the development branch will be used for the tests.'
echo "Each test can have its own options, e.g. 'buildTest.py -d,unitTest.py -lv'"
}
# Process command-line options.
branch=''
commit=''
debug=false
verbose=false
while getopts ':b:c:dhv' option; do
case $option in
b) # Test a specific branch
branch=$OPTARG;;
c) # Test a specific commit or tag
commit=$OPTARG;;
d) # debug mode
debug=true;;
h) # display Help
Help
exit;;
v) # verbose mode
verbose=true;;
\?) # Invalid option
echo 'Error: Invalid option'
exit 1;;
esac
done
if $debug; then
echo "branch=${branch}"
echo "commit=${commit}"
echo "debug=${debug}"
echo "help=${help}"
echo "verbose=${verbose}"
fi
# Fetch the branch or commit to test. If neither specified, use the default.
if [[ -n $branch && -n $commit ]]; then
echo 'Cannot specify branch and commit together!'
exit 1
fi
if [[ -z $branch && -z $commit ]]; then
branch=$default_branch_to_test
fi
# At this point, either branch is specified, or commit/tag is specified.
# There should be no case where both are unspecified, or both are specified.
if [[ -n $branch ]]; then
export BRANCH_OR_COMMIT=$branch
else
export BRANCH_OR_COMMIT=$commit
fi
if $debug; then
echo "BRANCH_OR_COMMIT=${BRANCH_OR_COMMIT}"
fi
# Fetch the list of tests to run.
tests_to_run_str=$BASH_ARGV
if $debug; then
echo "tests_to_run_str=${tests_to_run_str}"
fi
# Split the test list string into an array.
# Tests are separated by comma, no spaces.
IFS="," tests_to_run=($tests_to_run_str)
if $debug; then
echo 'The test commands to run are:'
for test_cmd in "${tests_to_run[@]}"
do
echo $test_cmd
done
fi
# ############################################################################
# Load the SSH private key for testing. This is needed for BitBucket access.
# Note that this key does not have a passphrase, so it can easily be used
# from a cron job.
if $verbose; then
echo 'Loading SSH key into key agent.'
fi
eval `ssh-agent -s`
ssh-add $ssh_key_for_testing
# ############################################################################
# Set up the module system (needed when running from cron, harmless
# otherwise).
if $verbose; then
echo 'Setting up module system.'
fi
source /etc/profile.d/z00_modules.sh
# List at-start modules.
# The 2>&1 (no spaces!) is needed since the 'module' command sends
# output to stderr by default, and so it is lost when stdout is sent
# back to the user.
if $verbose; then
echo 'At start, the loaded modules are:'
module list 2>&1
fi
# ############################################################################
# Activate the conda installation for MAGE testing.
if $verbose; then
echo "Setting up conda environment for MAGE testing ${CONDA_ENVIRONMENT}."
fi
# This code is based on the code that the miniconda installer puts into
# ~/.bashrc or ~/.bash_profile when a user installs miniconda.
# This code is needed since $HOME/.bashrc is not run.
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "${MAGE_MINICONDA}/etc/profile.d/conda.sh" ]; then
. "${MAGE_MINICONDA}/etc/profile.d/conda.sh"
else
export PATH="${MAGE_MINICONDA}/bin:$PATH"
fi
fi
unset __conda_setup
conda activate $CONDA_ENVIRONMENT
if $verbose; then
echo "conda environment is `echo $CONDA_PREFIX`"
fi
if $verbose; then
echo 'The installed version of kaipy in this environment is:'
conda list kaipy
fi
# ############################################################################
# Make the CDF library available (needed by the satellite comparison
# python scripts).
if $verbose; then
echo 'Sourcing CDF setup script.'
fi
source $cdf_setup_script
# ############################################################################
# Create the ISO 8601 datetime stamp for this set of tests.
testing_datetime=`date --iso-8601=seconds`
if $debug; then
echo "testing_datetime=${testing_datetime}"
fi
# <HACK>
# The make tool cannot handle file or directory names that contain a colon
# (':'). Process the ISO 8601 date time string from the ISO 8601 form:
# YYYY-MM-DDTHH:mm:ss[+-]hh:mm
# into the more compact form:
# YYYYMMDD_HHMMSS
# The sed commands to do this are, in order:
# s/.\{6\}$// - Remove the time zone offset (last 6 characters)
# s/-//g - Delete all '-'.
# s/\://g - Delete all ':'.
# s/T/_/ - Convert 'T' separating date and time to underscore.
testing_datetime=`echo $testing_datetime | sed -e 's/.\{6\}$//' -e 's/-//g' -e 's/\://g' -e 's/T/_/'`
if $debug; then
echo "testing_datetime=${testing_datetime}"
fi
# </HACK>
# Create a directory to hold all of the tests for this set of tests.
test_set_dir="${testing_datetime}-${BRANCH_OR_COMMIT}"
if $debug; then
echo "test_set_dir=${test_set_dir}"
fi
export MAGE_TEST_SET_ROOT="${MAGE_TEST_RUNS_ROOT}/${test_set_dir}"
if $verbose; then
echo "Creating directory for this set of tests at ${MAGE_TEST_SET_ROOT}."
fi
mkdir $MAGE_TEST_SET_ROOT
# ############################################################################
# Move to the directory for this set of tests.
cd $MAGE_TEST_SET_ROOT
# Clone the kaiju-private repository, call it kaiju locally.
if $verbose; then
echo "Cloning repository ${kaiju_repository}."
fi
git clone $kaiju_repository $local_kaiju_name
# Move into the repository clone.
cd $local_kaiju_name
# If a branch was requested, switch to that branch. Otherwise, if a commit
# or tag was requested, check out that commit or tag.
if $verbose; then
echo "Checking out branch/commit/tag ${BRANCH_OR_COMMIT}."
fi
git checkout $BRANCH_OR_COMMIT
# ############################################################################
# <HACK>
# Back up existing test code, since we need to use test code that is under
# development.
mkdir TEST_CODE_BACKUP
mv testingScripts TEST_CODE_BACKUP/
mv tests TEST_CODE_BACKUP/
# Copy latest test code.
new_test_code_root="${MAGE_TEST_ROOT}/kaiju-private"
if $verbose; then
echo "Copying updated test files from ${new_test_code_root}."
fi
cp -rp $new_test_code_root/testingScripts ./testingScripts
cp -rp $new_test_code_root/tests ./tests
# </HACK>
# ############################################################################
# Set up the kaiju environment using the just-checked-out code.
kaiju_setup_script="${MAGE_TEST_SET_ROOT}/${local_kaiju_name}/scripts/setupEnvironment.sh"
if $verbose; then
echo "Sourcing kaiju setup script ${kaiju_setup_script}."
fi
source $kaiju_setup_script
if $verbose; then
echo "KAIJUHOME is ${KAIJUHOME}."
fi
# ############################################################################
# List at-start environment variables.
if $debug; then
echo 'At start, the environment variables are:'
printenv
fi
# ############################################################################
# Compute the location of the test scripts.
kaiju_test_scripts_dir="${KAIJUHOME}/testingScripts"
if $debug; then
echo "kaiju_test_scripts_dir=${kaiju_test_scripts_dir}"
fi
# Run each test.
for mage_test in ${tests_to_run[@]}
do
echo $test_output_separator
if $verbose; then
echo "Moving to ${MAGE_TEST_SET_ROOT}."
fi
cd $MAGE_TEST_SET_ROOT
cmd="python ${kaiju_test_scripts_dir}/${mage_test}"
if $verbose; then
echo "Running test '${cmd}' at `date` on `hostname`."
fi
eval "${cmd}"
done
echo $test_output_separator
# ############################################################################
# Shut down the SSH agent.
if $verbose; then
echo "Shutting down SSH key agent."
fi
ssh-agent -k
# ############################################################################
echo "Ending $0 at `date` on `hostname`."
echo '***********************************************************************'

View File

@@ -0,0 +1,152 @@
#!/usr/bin/env python
"""Send a message to Slack.
Send a message to Slack.
Authors
-------
Eric Winter
"""
# Import standard modules.
import datetime
# import glob
import os
import sys
# # Import 3rd-party modules.
# Import project modules.
import common
# Program constants
# Program description.
DESCRIPTION = "Send a message to Slack."
# # Root of directory tree for this set of tests.
# MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
# # Directory for unit tests
# UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'unitTest')
# # glob pattern for naming unit test directories
# UNIT_TEST_DIRECTORY_GLOB_PATTERN = 'unitTest_*'
# # Name of build subdirectory containing binaries
# BUILD_BIN_DIR = 'bin'
# # Name of file containing job IDs for each unit test directory.
# JOB_ID_LIST_FILE = 'jobs.txt'
def create_command_line_parser():
"""Create the command-line argument parser.
Create the parser for command-line arguments.
Parameters
----------
None
Returns
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
Raises
------
None
"""
parser = common.create_command_line_parser(DESCRIPTION)
parser.add_argument(
"message",
default="",
help="Message to send to Slack (default: %(default)s)"
)
return parser
def send_slack_message(args: dict = None):
"""Send a message to Slack.
Send a message to Slack.
Parameters
----------
args : dict
Dictionary of program options.
Returns
-------
None
Raises
------
None
"""
# Local convenience variables.
debug = args["debug"]
be_loud = args["loud"]
slack_on_fail = args["slack_on_fail"]
is_test = args["test"]
verbose = args["verbose"]
message = args["message"]
# ------------------------------------------------------------------------
if debug:
print(f"Starting {sys.argv[0]} at {datetime.datetime.now()}")
print(f"Current directory is {os.getcwd()}")
# ------------------------------------------------------------------------
# Create the Slack client.
slack_client = common.slack_create_client()
slack_response_summary = common.slack_send_message(
slack_client, message, is_test=is_test
)
# ------------------------------------------------------------------------
if debug:
print(f"Ending {sys.argv[0]} at {datetime.datetime.now()}")
def main():
"""Begin main program.
This is the main program code.
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
if args.debug:
print(f"args = {args}")
# ------------------------------------------------------------------------
# Call the main program logic. Note that the Namespace object (args)
# returned from the option parser is converted to a dict using vars().
send_slack_message(vars(args))
if __name__ == "__main__":
main()

View File

@@ -5,7 +5,7 @@
<time tFin="11.5"/>
<spinup doSpin="T" tSpin="5.0"/>
<output dtOut="300.0" tsOut="100"/>
<coupling dtCouple="5.0" imType="RCM" doQkSquish="T" qkSquishStride="2" doAsyncCoupling="F"/>
<coupling dtCouple="5.0" imType="RAIJU" doQkSquish="T" qkSquishStride="2" doAsyncCoupling="F"/>
<restart dtRes="10800.0"/>
<imag doInit="T"/>
<threading NumTh="128"/>
@@ -36,11 +36,16 @@
<conductance doStarlight="T" doRamp="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<rcmdomain domType="ELLIPSE"/>
<ellipse xSun="12.5" yDD="15.0" xTail="-20.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<plasmasphere isDynamic="T" initKp="2" doRefill="T" tAvg="600.0"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -0,0 +1,44 @@
#!/bin/bash
#PBS -N {{ job_name }}
#PBS -A {{ account }}
#PBS -q {{ queue }}
#PBS -l job_priority={{ job_priority }}
#PBS -l select=1:ncpus=128
#PBS -l walltime={{ walltime }}
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
module --force purge
{%- for module in modules %}
module load {{ module }}
{%- endfor %}
echo 'The currently loaded modules are:'
module list
echo 'The active environment variables are:'
printenv
echo 'Copying pFUnit binaries.'
pfunit_dir="{{ mage_test_root }}/pfunit/pFUnit-4.2.0/ifort-23-mpich-derecho"
kaiju_external_dir="{{ kaijuhome }}/external"
cp -rp "${pfunit_dir}/FARGPARSE-1.1" "${kaiju_external_dir}/"
cp -rp "${pfunit_dir}/GFTL-1.3" "${kaiju_external_dir}/"
cp -rp "${pfunit_dir}/GFTL_SHARED-1.2" "${kaiju_external_dir}/"
cp -rp "${pfunit_dir}/PFUNIT-4.2" "${kaiju_external_dir}/"
# Build the code.
cmd="{{ cmake_cmd }} >& cmake.out"
echo $cmd
eval $cmd
cmd="{{ make_cmd }} >& make.out"
echo $cmd
eval $cmd
echo "Job $PBS_JOBID ended at `date` on `hostname` in directory `pwd`."

File diff suppressed because it is too large Load Diff

View File

@@ -111,8 +111,7 @@ def main():
print(f"Checking unit test results in {unit_test_directory}.")
# Move to the directory containing the unit test results.
path = os.path.join(UNIT_TEST_DIRECTORY, unit_test_directory,
BUILD_BIN_DIR)
path = os.path.join(UNIT_TEST_DIRECTORY, unit_test_directory)
if debug:
print(f"path = {path}")
os.chdir(path)
@@ -136,19 +135,27 @@ def main():
# NOTE: This needs to be reorganized.
# Compute the names of the job log files.
job_file_0 = f"genTestData.o{job_ids[0]}" # 0 OKs
job_file_1 = f"runCaseTests.o{job_ids[1]}" # 2 OKs
job_file_2 = f"runNonCaseTests1.o{job_ids[2]}" # 7 OKs
job_file_3 = f"runNonCaseTests2.o{job_ids[3]}" # 1 OK
if debug:
print(f"job_file_0 = {job_file_0}")
print(f"job_file_1 = {job_file_1}")
print(f"job_file_2 = {job_file_2}")
print(f"job_file_3 = {job_file_3}")
# 0 OKs
job_file_build = f"../unitTest-build.o{job_ids[0]}"
# 0 OKs
job_file_genTestData = f"../unitTest-genTestData.o{job_ids[1]}"
# 2 OKs
job_file_caseTests = f"../unitTest-caseTests.o{job_ids[2]}"
# 6 OKs
job_file_noncaseTests1 = f"../unitTest-noncaseTests1.o{job_ids[3]}"
# 1 OK
job_file_noncaseTests2 = f"../unitTest-noncaseTests2.o{job_ids[4]}"
# Combine the results of each test log file.
os.chdir("bin")
bigFile = []
job_files = [job_file_0, job_file_1, job_file_2, job_file_3]
job_files = [
job_file_build,
job_file_genTestData,
job_file_caseTests,
job_file_noncaseTests1,
job_file_noncaseTests2,
]
for job_file in job_files:
with open(job_file, 'r', encoding='utf-8') as f:
bigFile += f.readlines()
@@ -164,8 +171,8 @@ def main():
elif 'job killed' in line:
jobKilled = True
# There should be exactly 10 OKs.
OK_COUNT_EXPECTED = 10
# There should be exactly 9 OKs.
OK_COUNT_EXPECTED = 9
if verbose:
print(f"Found {okCount} OKs, expected {OK_COUNT_EXPECTED}.")
if okCount != OK_COUNT_EXPECTED:
@@ -235,6 +242,13 @@ def main():
if debug:
print(f"slack_response_summary = {slack_response_summary}")
# Also write a summary file to the root folder of this test
with open(os.path.join(
MAGE_TEST_SET_ROOT, 'testSummary.out'), 'w', encoding='utf-8'
) as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -20,21 +20,26 @@ echo 'The currently loaded modules are:'
module list
echo 'Loading python environment.'
export CONDARC="{{ condarc }}"
export CONDA_ENVS_PATH="{{ conda_envs_path }}"
mage_miniconda3="{{ mage_test_root }}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo "The current conda environment is ${CONDA_PREFIX}."
@@ -42,10 +47,6 @@ echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo "The kaiju software is located at ${KAIJUHOME}."
echo 'Setting up kaipy environment.'
source {{ kaipy_private_root }}/kaipy/scripts/setupEnvironment.sh
echo "The kaipy software is located at ${KAIPYHOME}."
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
@@ -83,10 +84,10 @@ echo 'Generating the solar wind boundary condition file.'
{{ cda2wind_cmd }}
echo "The solar wind boundary condition file is `ls bcwind.h5`."
# Generate the RCM configuration file.
echo 'Generating the RCM configuration file.'
{{ genRCM_cmd }}
echo "The RCM configuration file is `ls rcmconfig.h5`."
# Generate the raiju configuration file.
echo 'Generating the raiju configuration file.'
{{ genRAIJU_cmd }}
echo "The RAIJU configuration file is `ls raijuconfig.h5`."
# Run the model.
MPICOMMAND="{{ mpiexec_cmd }}"

View File

@@ -142,15 +142,15 @@ def weekly_dash(args: dict):
make_cmd = "make voltron_mpi.x"
# Create the command to generate the LFM grid.
genLFM_cmd = "genLFM.py -gid Q"
genLFM_cmd = "genLFM -gid Q"
# Create the command to generate the solar wind boundary condition file.
cda2wind_cmd = (
"cda2wind.py -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
"cda2wind -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
)
# Create the command to generate the RCM configuration.
genRCM_cmd = "genRCM.py"
# Create the command to generate the raiju configuration.
genRAIJU_cmd = "genRAIJU"
# Create the command for launching an MPI program.
mpiexec_cmd = f"mpiexec {KAIJUHOME}/scripts/preproc/pinCpuCores.sh"
@@ -178,13 +178,10 @@ def weekly_dash(args: dict):
pbs_options["job_priority"] = os.environ["DERECHO_TESTING_PRIORITY"]
pbs_options["walltime"] = "08:00:00"
pbs_options["modules"] = module_names
pbs_options["condarc"] = os.environ["CONDARC"]
pbs_options["conda_envs_path"] = os.environ["CONDA_ENVS_PATH"]
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["kaipy_private_root"] = os.environ["KAIPY_PRIVATE_ROOT"]
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["branch_or_commit"] = os.environ["BRANCH_OR_COMMIT"]
@@ -192,7 +189,7 @@ def weekly_dash(args: dict):
pbs_options["make_cmd"] = make_cmd
pbs_options["genLFM_cmd"] = genLFM_cmd
pbs_options["cda2wind_cmd"] = cda2wind_cmd
pbs_options["genRCM_cmd"] = genRCM_cmd
pbs_options["genRAIJU_cmd"] = genRAIJU_cmd
pbs_options["mpiexec_cmd"] = mpiexec_cmd
pbs_options["voltron_cmd"] = voltron_cmd

View File

@@ -5,10 +5,10 @@
<time tFin="32407.0"/>
<spinup doSpin="T" tSpin="7200.0"/>
<output dtOut="300.0" tsOut="100"/>
<coupling dtCouple="5.0" imType="RCM" doQkSquish="T" qkSquishStride="2" doAsyncCoupling="F"/>
<coupling dtCouple="5.0" imType="RAIJU" doQkSquish="T" qkSquishStride="2" doAsyncCoupling="F"/>
<restart dtRes="10800.0"/>
<imag doInit="T"/>
<threading NumTh="64"/>
<threading NumTh="128"/>
<!-- without quick squish, estimated 13 helpers required -->
<helpers numHelpers="2" useHelpers="T" doSquishHelp="T"/>
</VOLTRON>
@@ -38,10 +38,15 @@
<conductance doStarlight="T" doRamp="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<rcmdomain domType="ELLIPSE"/>
<ellipse xSun="12.5" yDD="15.0" xTail="-20.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<plasmasphere isDynamic="T" initKp="2" doRefill="T" tAvg="600.0"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -121,9 +121,6 @@ REMIX_NORTH_QUICKLOOK_MASTER = os.path.join(
REMIX_SOUTH_QUICKLOOK_MASTER = os.path.join(
REFERENCE_RESULTS_DIRECTORY_MASTER, 'remix_s.png'
)
RCM_QUICKLOOK_MASTER = os.path.join(
REFERENCE_RESULTS_DIRECTORY_MASTER, 'qkrcmpic.png'
)
# Compute the paths to the quicklook plots for the development branch.
MAGNETOSPHERE_QUICKLOOK_DEVELOPMENT = os.path.join(
@@ -135,9 +132,6 @@ REMIX_NORTH_QUICKLOOK_DEVELOPMENT = os.path.join(
REMIX_SOUTH_QUICKLOOK_DEVELOPMENT = os.path.join(
REFERENCE_RESULTS_DIRECTORY_DEVELOPMENT, 'remix_s.png'
)
RCM_QUICKLOOK_DEVELOPMENT = os.path.join(
REFERENCE_RESULTS_DIRECTORY_DEVELOPMENT, 'qkrcmpic.png'
)
def main():
@@ -908,27 +902,6 @@ def main():
# ------------------------------------------------------------------------
# Make the RCM quick-look plot.
if verbose:
print(f"Creating RCM quicklook plot for {os.getcwd()}.")
# Create the plot.
cmd = 'rcmpic.py'
if debug:
print(f"cmd = {cmd}")
try:
_ = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(
'ERROR: Unable to create RCM quicklook plot.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
f'See log for output.\n',
file=sys.stderr
)
# ------------------------------------------------------------------------
# Create merged images for the quicklook plots.
# Merge magnetosphere quicklooks.
@@ -988,25 +961,6 @@ def main():
file=sys.stderr
)
# Merge RCM quicklooks.
cmd = (
f"convert {RCM_QUICKLOOK_MASTER}"
f" {RCM_QUICKLOOK_DEVELOPMENT}"
' qkrcmpic.png -append combined_qkrcmpic.png'
)
if debug:
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(
'ERROR: Unable to combine RCM quicklook plots.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
f'See log for output.\n',
file=sys.stderr
)
# ------------------------------------------------------------------------
# List the files to post and their comments.
@@ -1017,11 +971,9 @@ def main():
'qkmsphpic.png',
'remix_n.png',
'remix_s.png',
'qkrcmpic.png',
'combined_msphpic.png',
'combined_remix_n.png',
'combined_remix_s.png',
'combined_qkrcmpic.png'
]
comments_to_post = [
'Real-Time Performance\n\n',
@@ -1030,11 +982,9 @@ def main():
'Magnetosphere Quicklook Plots\n\n',
'REMIX (north) Quicklook Plots\n\n',
'REMIX (south) Quicklook Plots\n\n',
'RCM Quicklook Plots\n\n',
'Magnetosphere Quicklook Comparison Plots\n\n',
'REMIX (north) Quicklook Comparison Plots\n\n',
'REMIX (south) Quicklook Comparison Plots\n\n',
'RCM Quicklook Comparison Plots\n\n'
]
# If loud mode is on, post results to Slack.

View File

@@ -0,0 +1,17 @@
<?xml version="1.0"?>
<Kaiju>
<Gamera>
<sim runid="blast3d_large2" doH5init="F" icType="BW" pdmb="4.0"/>
<idir min="-0.5" max="0.5" N="128"/>
<jdir min="-0.5" max="0.5" N="128"/>
<kdir min="-0.5" max="0.5" N="128"/>
<iPdir N="2" bcPeriodic="T"/>
<jPdir N="1" bcPeriodic="T"/>
<kPdir N="1" bcPeriodic="T"/>
<time tFin="1.0"/>
<output dtOut="0.01" tsOut="10"/>
<physics doMHD="F" do25D="T"/>
<coupling blockHalo="T"/>
<prob B0="0.1"/>>
</Gamera>
</Kaiju>

View File

@@ -1,7 +1,7 @@
<?xml version="1.0"?>
<Kaiju>
<Gamera>
<sim runid="blast3d_large" doH5init="F" icType="BW" pdmb="4.0"/>
<sim runid="blast3d_large8" doH5init="F" icType="BW" pdmb="4.0"/>
<idir min="-0.5" max="0.5" N="128"/>
<jdir min="-0.5" max="0.5" N="128"/>
<kdir min="-0.5" max="0.5" N="128"/>

View File

@@ -21,7 +21,7 @@ contains
end subroutine lastSerial
@test(npes=[8])
subroutine testBlast3D(this)
subroutine testBlast3D_8(this)
class (MpiTestMethod), intent(inout) :: this
type(gamAppMpi_T) :: gameraAppMpi
@@ -31,7 +31,7 @@ contains
gameraAppMpi%gOptions%userInitFunc => initUser
gameraAppMpi%gOptionsMpi%gamComm = getMpiF08Communicator(this)
xmlInp = New_XML_Input('blast3d_large.xml','Kaiju',.true.)
xmlInp = New_XML_Input('blast3d_large8.xml','Kaiju',.true.)
call gameraAppMpi%InitModel(xmlInp)
do while ((gameraAppMpi%Model%tFin - gameraAppMpi%Model%t) > 1e-15)
@@ -48,7 +48,98 @@ contains
end do
write(*,*) 'End time = ', gameraAppMpi%Model%t
end subroutine testBlast3D
end subroutine testBlast3D_8
@test(npes=[2])
subroutine testBlast3D_2(this)
class (MpiTestMethod), intent(inout) :: this
type(gamAppMpi_T) :: gameraAppMpi
type(XML_Input_T) :: xmlInp
call setMpiReal()
gameraAppMpi%gOptions%userInitFunc => initUser
gameraAppMpi%gOptionsMpi%gamComm = getMpiF08Communicator(this)
xmlInp = New_XML_Input('blast3d_large2.xml','Kaiju',.true.)
call gameraAppMpi%InitModel(xmlInp)
do while ((gameraAppMpi%Model%tFin - gameraAppMpi%Model%t) > 1e-15)
call stepGamera_mpi(gameraAppMpi)
if (gameraAppMpi%Model%IO%doConsole(gameraAppMpi%Model%t)) then
call consoleOutput(gameraAppMpi%Model,gameraAppMpi%Grid,gameraAppMpi%State)
endif
if (gameraAppMpi%Model%IO%doOutput(gameraAppMpi%Model%t)) then
call fOutput(gameraAppMpi%Model,gameraAppMpi%Grid,gameraAppMpi%State)
endif
end do
write(*,*) 'End time = ', gameraAppMpi%Model%t
end subroutine testBlast3D_2
!this test must be at the bottom so that the data is generated by the two tests above
@test(npes=[1])
subroutine compareBlastWaves(this)
class (MpiTestMethod), intent(inout) :: this
type(IOVAR_T), dimension(25) :: IOVars
real(rp), allocatable :: p8(:,:,:), p2(:,:,:)
integer :: i,j,k,ni,nj,nk,ni2,nj2,nk2
character(len=strLen) :: h5Str, gStr, errMsg
call setMpiReal()
h5Str = trim('blast')
gStr = '/Step#99'
call ClearIO(IOVars)
call AddInVar(IOVars,"P")
! manually read in the 2 parts of blast3d_large2 and also determine the size of the data
h5Str = 'blast3d_large2_0002_0001_0001_0000_0000_0000.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
ni = 2*IOVars(1)%dims(1)
nj = IOVars(1)%dims(2)
nk = IOVars(1)%dims(3)
ni2 = ni/2
nj2 = nj/2
nk2 = nk/2
allocate(p2(ni,nj,nk))
allocate(p8(ni,nj,nk))
call IOArray3DFill(IOVars,"P",p2(1:ni2,:,:))
call ClearIO(IOVars)
call AddInVar(IOVars,"P")
h5Str = 'blast3d_large2_0002_0001_0001_0001_0000_0000.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
call IOArray3DFill(IOVars,"P",p2(ni2+1:ni,:,:))
call ClearIO(IOVars)
! loop to read in the parts blast3d_large8
do i=1,2
do j=1,2
do k=1,2
call AddInVar(IOVars,"P")
write(h5Str,'(A,I0,A,I0,A,I0,A)') 'blast3d_large8_0002_0002_0002_000',i-1,'_000',j-1,'_000',k-1,'.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
call IOArray3DFill(IOVars,"P",p8(1+(i-1)*ni2:i*ni2,1+(j-1)*nj2:j*nj2,1+(k-1)*nk2:k*nk2))
call ClearIO(IOVars)
enddo
enddo
enddo
! check values
do i=1,ni
do j=1,nj
do k=1,nk
write(errMsg,'(A,I0,A,I0,A,I0,A)') 'Blast wave values not equal at (',i,',',j,',',k,')'
@assertEqual(p2(i,j,k),p8(i,j,k),1e-12,trim(errMsg))
enddo
enddo
enddo
end subroutine compareBlastWaves
end module testCasesMpi

View File

@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,16 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
# Move to the directory containing the compiled code.
cd bin
echo 'Copying input files.'
test_inputs_dir="{{ mage_test_root }}/unit_test_inputs"
cp "${test_inputs_dir}/bcwind.h5" .
cp "${test_inputs_dir}/geo_mpi.xml" .
cp "${test_inputs_dir}/lfmD.h5" .
cp "${test_inputs_dir}/raijuconfig.h5" .
echo 'Generating data for testing.'
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"
$MPICOMMAND ./voltron_mpi.x cmiD_deep_8_genRes.xml >& cmiD_deep_8_genRes.out

View File

@@ -1,82 +0,0 @@
module testetautils
use testHelper
use kdefs, only : TINY
use rcmdefs
use conversion_module, only : almmax,almmin
use RCM_mod_subs
use torcm_mod
use xml_input
use etautils
implicit none
contains
@before
subroutine setup()
end subroutine setup
@after
subroutine teardown()
end subroutine teardown
!Helpers
subroutine initAlams()
real (rp) :: itimei,itimef
integer(iprec) :: nstep
type(XML_Input_T) :: xmlInp
INTEGER(iprec) :: ierr
itimei = 0
itimef = 60
nstep = 0
xmlInp = New_XML_Input('cmriD.xml','Kaiju/Voltron',.true.)
! Get RCM to read rcmconfig.h5 and store in alamc
call setFactors(6.371E6_rprec)
call allocate_conversion_arrays (isize,jsize,kcsize)
call RCM(itimei, itimef, nstep, 0_iprec)
call RCM(itimei, itimef, nstep, 1_iprec)
call Read_alam (kcsize, alamc, ikflavc, fudgec, almdel, almmax, almmin, iesize, ierr)
end subroutine initAlams
@test
subroutine testDPetaDP()
real (rp) :: etas(kcsize)
real (rp) :: vm
real (rp) :: Do_rc ,Po_rc
real (rp) :: Df_rc,Df_psph,Pf_rc
real(rp) :: OKerr
character(len=strLen) :: checkMessage
real(rp) :: D_err, P_err
OKerr = 2e-2_rp
! 5 keV at L=10
Do_rc = 0.5 *1E6! [1/cc -> 1/m^3]
Po_rc = 0.5 *1E-9! [nPa -> Pa]
vm = 2.262
call initAlams()
call DP2eta(Do_rc,Po_rc,vm,etas,doRescaleO=.false.,doKapO=.false.)
call eta2DP(etas,vm,Df_rc,Df_psph,Pf_rc)
D_err = (Do_rc - Df_rc)/Do_rc
P_err = (Po_rc - Pf_rc)/Po_rc
write(*,*) 'D_err=',D_err*100,'%'
write(*,*) 'P_err=',P_err*100,'%'
write (checkMessage,'(A,I0,A)') "D->eta->D' error > ",OKerr*100,"%"
@assertLessThanOrEqual(abs(D_err), OKerr, checkMessage)
write (checkMessage,'(A,I0,A)') "D->eta->D' error > ",OKerr*100,"%"
@assertLessThanOrEqual(abs(P_err), OKerr, checkMessage)
end subroutine testDPetaDP
end module testetautils

View File

@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
# Move to the directory containing the compiled code.
cd bin
echo 'Running non-MPI test cases.'
./caseTests >& caseTests.out
echo 'Non-MPI test cases complete.'

View File

@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
# Move to the directory containing the compiled code.
cd bin
echo 'Running GAMERA tests.'
date
./gamTests >& gamTests.out
@@ -42,13 +48,6 @@ date
echo 'REMIX tests complete.'
echo | tail -n 3 ./mixTests.out
echo 'Running RCM tests.'
date
./rcmTests >& rcmTests.out
date
echo 'RCM tests complete.'
echo | tail -n 3 ./rcmTests.out
echo 'Running SHELLGRID tests.'
date
./shgrTests >& shgrTests.out

View File

@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
# Move to the directory containing the compiled code.
cd bin
echo 'Running VOLTRON MPI tests.'
date
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"

View File

@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
# Abort script on any error.
set -e
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -19,23 +22,28 @@ module load {{ module }}
module list
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate kaiju-3.8-testing
conda activate {{ conda_environment }}
echo "The active conda environment is ${CONDA_DEFAULT_ENV}."
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
@@ -47,6 +55,13 @@ export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'
printenv
# Move to the directory containing the compiled code.
cd bin
if [[ $? -eq 1 ]]; then
python $KAIJUHOME/testingScripts/send_slack_message.py "Unit test build failed in `pwd`!"
exit 1
fi
echo 'Generating unit test report.'
python $KAIJUHOME/testingScripts/unitTestReport.py {{ report_options }} >& unitTestReport.out

View File

@@ -7,7 +7,7 @@
<spinup doSpin="T" tSpin="60.0" tIO="0.0"/>
<output dtOut="60.0" tsOut="100" doTimer="F"/>
<restart dtRes="1800.0"/>
<coupling dtCouple="5.0" rDeep="8.0" imType="RCM"/>
<coupling dtCouple="5.0" rDeep="8.0" imType="RAIJU"/>
</VOLTRON>
<Gamera>
<sim runid="msphere" doH5g="T" H5Grid="lfmD.h5" icType="user" pdmb="1.0" pFloor="1.0e-8" dFloor="1.0e-6" rmeth="7UP"/>
@@ -29,4 +29,15 @@
<fields grType="LFM"/>
<domain dtype="SPH" rmin="2.0" rmax="25.0"/>
</CHIMP>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -4,7 +4,7 @@
<time tFin="108000.2"/>
<spinup doSpin="T" tSpin="60.0" tIO="0.0" doSeq="F"/>
<output dtOut="30.0" tsOut="100"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RCM" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RAIJU" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<restart dtRes="1800.0"/>
<imag doInit="T"/>
<ebsquish epsSquish="0.05"/>
@@ -33,12 +33,15 @@
<conductance doStarlight="T" doRamp="F" doMR="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<ellipse xSun="12.5" yDD="15.0" xTail="-15.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<torcm doFluidSplit="F"/>
<tomhd doAvg2MHD="T"/>
<plasmasphere isDynamic="T" initKp="3" doRefill="T" DenPP0="1.0" staticR="2" tAvg="300.0"/>
<loss doRelax="F" doNewCX="T"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -4,7 +4,7 @@
<time tFin="108000.2"/>
<spinup doSpin="T" tSpin="60.0" tIO="0.0" doSeq="F"/>
<output dtOut="30.0" tsOut="100"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RCM" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RAIJU" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<restart dtRes="1800.0"/>
<imag doInit="T"/>
<ebsquish epsSquish="0.05"/>
@@ -33,12 +33,15 @@
<conductance doStarlight="T" doRamp="F" doMR="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<ellipse xSun="12.5" yDD="15.0" xTail="-15.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<torcm doFluidSplit="F"/>
<tomhd doAvg2MHD="T"/>
<plasmasphere isDynamic="T" initKp="3" doRefill="T" DenPP0="1.0" staticR="2" tAvg="300.0"/>
<loss doRelax="F" doNewCX="T"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -4,7 +4,7 @@
<time tFin="108000.2"/>
<spinup doSpin="T" tSpin="60.0" tIO="0.0" doSeq="F"/>
<output dtOut="30.0" tsOut="100"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RCM" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RAIJU" doQkSquish="T" doDynDT="F" qkSquishStride="2"/>
<restart dtRes="1800.0"/>
<imag doInit="T"/>
<ebsquish epsSquish="0.05"/>
@@ -33,12 +33,15 @@
<conductance doStarlight="T" doRamp="F" doMR="F"/>
<precipitation aurora_model_type="LINMRG" alpha="0.2" beta="0.4" doAuroralSmooth="F"/>
</REMIX>
<RCM>
<ellipse xSun="12.5" yDD="15.0" xTail="-15.0" isDynamic="T"/>
<grid LowLat="30.0" HiLat="75.0" doLatStretch="F"/>
<torcm doFluidSplit="F"/>
<tomhd doAvg2MHD="T"/>
<plasmasphere isDynamic="T" initKp="3" doRefill="T" DenPP0="1.0" staticR="2" tAvg="300.0"/>
<loss doRelax="F" doNewCX="T"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

View File

@@ -9,6 +9,8 @@ module testVoltGridGen
implicit none
character(len=strLen) :: xmlName = 'voltGridTests.xml'
integer, parameter :: gamRes = 64
!! genVoltShellGrid expects a gamera resolution to determine defaults for its own
contains
@@ -36,7 +38,7 @@ module testVoltGridGen
call xmlInp%Set_Val(Nt, "grid/Nt", -1) ! -1 so things blow up if xml isn't set properly
call xmlInp%Set_Val(Np, "grid/Np", -1) ! -1 so things blow up if xml isn't set properly
call genVoltShellGrid(vApp, xmlInp)
call genVoltShellGrid(vApp, xmlInp, gamRes)
@assertEqual(vApp%shGrid%Nt, 2*Nt, "Wrong amount of theta cells")
@assertEqual(vApp%shGrid%Np, Np , "Wrong amount of phi cells")
@@ -62,7 +64,7 @@ module testVoltGridGen
call xmlInp%Set_Val(Nt, "grid/Nt", -1) ! -1 so things blow up if xml isn't set properly
call xmlInp%Set_Val(Np, "grid/Np", -1) ! -1 so things blow up if xml isn't set properly
call genVoltShellGrid(vApp, xmlInp)
call genVoltShellGrid(vApp, xmlInp, gamRes)
@assertEqual(2*Nt, vApp%shGrid%Nt, "Wrong amount of theta cells")
@assertEqual( Np, vApp%shGrid%Np, "Wrong amount of phi cells")

View File

@@ -16,7 +16,8 @@ contains
subroutine lastSerial()
end subroutine lastSerial
@test
! no more rcm, no more rcm test
!@test
subroutine testRcmFakeTubes()
! testing that rcm produces expected output when debug tubes is enabled
type(voltApp_T) :: voltronApp

View File

@@ -4,7 +4,7 @@
<voltron>
<time tFin="677.115987461"/>
<spinup doSpin="T" tSpin="60.0" tIO="0.0"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RCM" doQkSquish="T" doDynDT="T" doAsyncCoupling="F"/>
<coupling dtCouple="5.0" rTrc="40.0" imType="RAIJU" doQkSquish="T" doDynDT="T" doAsyncCoupling="F"/>
<restart dtRes="1800.0"/>
<imag doInit="T"/>
<ebsquish epsSquish="0.05"/>
@@ -37,11 +37,15 @@
<domain dtype="MAGE"/>
<tracer epsds="0.05"/>
</CHIMP>
<RCM>
<output debug="F" toRCM="F" toMHD="F"/>
<clawpack doKaiClaw="F" doOMPClaw="T"/>
<ellipse xSun="10.0" yDD="12.5" xTail="-15.0" isDynamic="F"/>
<grid LowLat="25.0" HiLat="75.0" doLatStretch="F"/>
<plasmasphere isDynamic="T" initKp="1"/>
</RCM>
<RAIJU>
<output loudConsole="T" doFat="F" doLossExtras="F" doDebug="F" writeGhosts="F"/>
<grid gType="SHGRID" ThetaL="15" ThetaU="50"/>
<domain tail_buffer="15.0" sun_buffer="15.0" tail_active="12.0" sun_active="12.0"/>
<sim pdmb="0.75"/>
<config fname="raijuconfig.h5"/>
<plasmasphere doPsphere="T" doExcessMap="T"/>
<losses doLosses="T" doCX="T" doSS="T" doCC="T"/>
<cpl nFluidsIn="1" startupTscl="7200.0" vaFracThresh="0.60" bminThresh="5.0" Pstd="5.0" normAngThresh="180.0"/>
<fluidIn1 imhd="0" flav="2" excessToPsph="T"/>
</RAIJU>
</Kaiju>

Some files were not shown because too many files have changed in this diff Show More