Merge branch 'development' into betterDipTest

This commit is contained in:
Jeffrey Garretson
2024-09-10 16:30:22 -06:00
35 changed files with 170 additions and 224 deletions

View File

@@ -1,74 +0,0 @@
#test_satcomp_cdasws
import datetime
import kaipy.satcomp.scutils as scutils
import pytest
import numpy as np
def test_getscIds():
scIdDict = scutils.getScIds()
assert type(scIdDict) == dict, "Returned type is {}, but should be type dict".format(type(scIdDict))
@pytest.mark.parametrize("key",[key for key in scutils.getScIds().keys()])
def test_emphem(key):
scIdDict = scutils.getScIds()
assert 'Ephem' in scIdDict[key], \
'{} lacks Ephem key'.format(key)
assert 'Id' in scIdDict[key]['Ephem'], \
'{} lacks Ephem Id key'.format(key)
assert 'Data' in scIdDict[key]['Ephem'], \
'{} lacks Ephem Data key'.format(key)
assert 'CoordSys' in scIdDict[key]['Ephem'], \
'{} lacks Ephem CoordSys key'.format(key)
@pytest.mark.parametrize("key",[key for key in scutils.getScIds().keys()])
def test_pullVar(key):
scIdDict = scutils.getScIds()
for var in scIdDict[key]:
if var != "_testing":
tStart, tEnd = scutils.getCdasDsetInterval(scIdDict[key][var]['Id'])
assert tStart is not None,\
'{} did not have valid start time'.format(key)
# t0 = tStart
# t0dt = datetime.datetime.strptime(t0, "%Y-%m-%dT%H:%M:%S.%fZ")
# t1 = (t0dt + datetime.timedelta(days=1)).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
t1 = tEnd
t1dt = datetime.datetime.strptime(t1, "%Y-%m-%dT%H:%M:%S.%fZ")
t0 = (t1dt - datetime.timedelta(days=1)).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
dset_id = scIdDict[key][var]['Id']
dset_vname = scIdDict[key][var]['Data']
status,data = scutils.pullVar(scIdDict[key][var]['Id'],scIdDict[key][var]['Data'],
t0,t1,60.0)
assert status['http']['status_code'] == 200, \
"pullVar failed to return for {},{}".format(key,var)
if isinstance(scIdDict[key][var]['Data'],str):
assert scIdDict[key][var]['Data'] in data.keys(), \
"pullVar error - data does not contain {} for {},{}".format(scIdDict[key][var]['Data'],key,var)
if isinstance(scIdDict[key][var]['Data'],list):
for item in scIdDict[key][var]['Data']:
assert item in data.keys(), \
"pullVar error - data does not contain {} for {},{}".format(item,key,var)
@pytest.fixture
def exampleObs():
return np.array([3, -0.5, 2, 7])
@pytest.fixture
def examplePred():
return np.array([2.5, 0.0, 2, 8])
def test_computeErrors(exampleObs,examplePred):
MAE,MSE,RMSE,MAPE,RSE,PE = scutils.computeErrors(exampleObs,examplePred)
assert MAE == pytest.approx(0.5)
assert MSE == pytest.approx(0.375)
assert RMSE == pytest.approx(0.6123724356957945)
assert MAPE == pytest.approx(0.3273809523809524)
assert RSE == pytest.approx(0.05139186295503212)
assert PE == pytest.approx(0.9486081370449679)
return

View File

@@ -1,7 +0,0 @@
# test_with_pytest.py
def test_always_passes():
assert True
#def test_always_fails():
# assert False

View File

@@ -11,7 +11,7 @@ $KAIJUHOME/testingScripts/mage_build_test_modules
This script reads the file initial_condition_build_test.lst from this
directory, and uses the contents as a list of module list files to use for
MAGE iinitial condition build tests.
MAGE initial condition build tests. Each build takes about 2 minutes.
NOTE: These tests are performed on a load-balance-assigned login node on
derecho. No PBS job is submitted.
@@ -122,7 +122,8 @@ def main():
# ------------------------------------------------------------------------
# Make a directory to hold all of the initial condition build tests.
print(f"Creating ${INITIAL_CONDITION_BUILD_TEST_DIRECTORY}.")
if verbose:
print(f"Creating ${INITIAL_CONDITION_BUILD_TEST_DIRECTORY}.")
os.mkdir(INITIAL_CONDITION_BUILD_TEST_DIRECTORY)
# ------------------------------------------------------------------------
@@ -296,8 +297,9 @@ def main():
if not os.path.isfile(path):
missing.append(executable)
if len(missing) > 0:
for executable in missing:
print(f"ERROR: Did not build {executable}.")
if verbose:
for executable in missing:
print(f"ERROR: Did not build {executable}.")
else:
test_passed[i_test][j_ic] = True
@@ -331,8 +333,7 @@ def main():
# Summarize the test results.
test_report_summary_string = (
'Summary of initial condition build test results from `ICtest.py`'
f" for branch or commit or tag `{BRANCH_OR_COMMIT}`: "
f"Initial condition build test results for `{BRANCH_OR_COMMIT}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string += '*FAILED*\n'
@@ -344,7 +345,7 @@ def main():
print(test_report_details_string)
# If a test failed, or loud mode is on, post report to Slack.
if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
if (slack_on_fail and 'FAILED' in test_report_summary_string) or be_loud:
slack_client = common.slack_create_client()
if debug:
print(f"slack_client = {slack_client}")

View File

@@ -9,7 +9,7 @@ $KAIJUHOME/testingScripts/mage_build_test_modules
This script reads the file build_test.lst from this directory, and
uses the contents as a list of module list files to use for MAGE build
tests.
tests. Each build takes about 10 minutes.
NOTE: These tests are performed on a load-balance-assigned login node on
derecho. No PBS job is submitted.
@@ -107,19 +107,20 @@ def main():
slack_on_fail = args.slack_on_fail
verbose = args.verbose
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
if debug:
print(f"Starting {sys.argv[0]} at {datetime.datetime.now()}")
print(f"Current directory is {os.getcwd()}")
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Make a directory to hold all of the build tests.
print(f"Creating ${BUILD_TEST_DIRECTORY}.")
if verbose:
print(f"Creating {BUILD_TEST_DIRECTORY}.")
os.mkdir(BUILD_TEST_DIRECTORY)
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Do a preliminary cmake run to generate the list of executables.
@@ -172,7 +173,8 @@ def main():
# Run make to build the list of executable targets.
if verbose:
print('Running make for executable list generation.')
cmd = f"{module_cmd}; make help | grep '\.x'"
pattern = r'\.x'
cmd = f"{module_cmd}; make help | grep '{pattern}'"
if debug:
print(f"cmd = {cmd}")
try:
@@ -209,7 +211,7 @@ def main():
if debug:
print(f"make_cmd = {make_cmd}")
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Make a list of module sets to build with.
@@ -220,7 +222,7 @@ def main():
if debug:
print(f"module_list_files = {module_list_files}")
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Initalize test results for all module sets to False (failed).
test_passed = [False]*len(module_list_files)
@@ -229,7 +231,7 @@ def main():
for (i_test, module_list_file) in enumerate(module_list_files):
if verbose:
print('Performing build test with module list file '
f"{module_list_file}")
f"{module_list_file}.")
# Extract the name of the list.
module_set_name = module_list_file.rstrip('.lst')
@@ -331,7 +333,7 @@ def main():
# End of loop over module sets.
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Detail the test results
test_report_details_string = ''
@@ -348,8 +350,7 @@ def main():
# Summarize the test results.
test_report_summary_string = (
'Summary of build test results from `buildTest.py`'
f" for branch or commit or tag `{BRANCH_OR_COMMIT}`: "
f"Build test results for `{BRANCH_OR_COMMIT}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string += '*FAILED*'
@@ -361,7 +362,7 @@ def main():
print(test_report_details_string)
# If a test failed, or loud mode is on, post report to Slack.
if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
if (slack_on_fail and 'FAILED' in test_report_summary_string) or be_loud:
slack_client = common.slack_create_client()
if debug:
print(f"slack_client = {slack_client}")

View File

@@ -20,5 +20,5 @@
30 00 * * 1 ssh derecho "/glade/u/home/ewinter/scratch/mage_testing/kaiju-private/ewinter-derecho_testing/kaiju-private/testingScripts/run_mage_tests.sh -v -b development 'weeklyDash.py -sv'" >> /glade/u/home/ewinter/scratch/mage_testing/logs/weeklyDash-2-development.out 2>&1
# Duplicate dashes for master
35 00 * * 1 ssh derecho "/glade/u/home/ewinter/scratch/mage_testing/kaiju-private/ewinter-derecho_testing/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'weeklyDash.py -lv'" >> /glade/u/home/ewinter/scratch/mage_testing/logs/weeklyDash-1-master.out 2>&1
40 00 * * 1 ssh derecho "/glade/u/home/ewinter/scratch/mage_testing/kaiju-private/ewinter-derecho_testing/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'weeklyDash.py -lv'" >> /glade/u/home/ewinter/scratch/mage_testing/logs/weeklyDash-2-master.out 2>&1
35 00 * * 1 ssh derecho "/glade/u/home/ewinter/scratch/mage_testing/kaiju-private/ewinter-derecho_testing/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'weeklyDash.py -sv'" >> /glade/u/home/ewinter/scratch/mage_testing/logs/weeklyDash-1-master.out 2>&1
40 00 * * 1 ssh derecho "/glade/u/home/ewinter/scratch/mage_testing/kaiju-private/ewinter-derecho_testing/kaiju-private/testingScripts/run_mage_tests.sh -v -b master 'weeklyDash.py -sv'" >> /glade/u/home/ewinter/scratch/mage_testing/logs/weeklyDash-2-master.out 2>&1

View File

@@ -29,7 +29,7 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
# Run the Intel checks.
# Run the Intel memory checks checks.
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"
${MPICOMMAND} inspxe-cl -collect=mi3 -r memResults -- ./voltron_mpi.x tinyCase.xml

View File

@@ -21,7 +21,7 @@ module list
echo 'Loading python environment.'
mage_test_root='{{ mage_test_root }}'
export CONDARC="${mage_test_root}/condarc"
export CONDA_ENVS_PATH="${mage_test_root}/conda"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
@@ -39,11 +39,13 @@ conda activate kaiju-3.8-testing
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
source {{ kaipyhome }}/kaipy/scripts/setupEnvironment.sh
echo 'Setting environment variables.'
export TMPDIR={{ tmpdir }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
export DERECHO_TESTING_ACCOUNT={{ account }}
export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'
printenv

View File

@@ -4,11 +4,11 @@
This script runs a series of tests of the MAGE software using Intel tools.
The Intel Inspector memory checks run in about an hour on two derecho nodes.
The Intel Inspector memory checks run in about 50 minutes on two derecho nodes.
The Intel Inspector thread checks run in about 45 minutes on two derecho nodes.
The report script runs in about 40 minutes on one derecho node.
The report script runs in about 2 minutes on one derecho node.
Authors
-------
@@ -90,12 +90,13 @@ REPORT_PBS_FILENAME = 'intelCheckSubmitReport.pbs'
TEST_INPUT_FILES = [
'tinyCase.xml',
'bcwind.h5',
'lfmD.h5',
'rcmconfig.h5',
'memSuppress.sup',
'threadSuppress.sup',
]
# Branch or commit (or tag) used for testing.
BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
def main():
"""Begin main program.
@@ -293,6 +294,42 @@ def main():
to_path = os.path.join('.', filename)
shutil.copyfile(from_path, to_path)
# Generate the LFM grid file.
if verbose:
print('Creating LFM grid file.')
cmd = 'genLFM.py -gid D'
if debug:
print(f"cmd = {cmd}")
try:
_ = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print('ERROR: Unable to create LFM grid file for module set '
f"{module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
'See testing log for output from genLFM.py.\n'
'Skipping remaining steps for module set'
f"{module_set_name}\n")
continue
# Generate the RCM configuration file.
if verbose:
print('Creating RCM configuration file.')
cmd = 'genRCM.py'
if debug:
print(f"cmd = {cmd}")
try:
_ = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print('ERROR: Unable to create RCM configuration file'
f" for module set {module_set_name}.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
'See testing log for output from genRCM.py.\n'
'Skipping remaining steps for module set '
f"{module_set_name}\n")
continue
# Assemble common data to fill in the PBS templates.
pbs_options = {}
pbs_options['account'] = os.environ['DERECHO_TESTING_ACCOUNT']
@@ -300,20 +337,22 @@ def main():
pbs_options['job_priority'] = os.environ['DERECHO_TESTING_PRIORITY']
pbs_options['modules'] = module_names
pbs_options['kaijuhome'] = KAIJUHOME
pbs_options['kaipyhome'] = os.environ["KAIPYHOME"]
pbs_options['tmpdir'] = os.environ['TMPDIR']
pbs_options['slack_bot_token'] = os.environ['SLACK_BOT_TOKEN']
pbs_options['mage_test_root'] = os.environ['MAGE_TEST_ROOT']
pbs_options['branch_or_commit'] = BRANCH_OR_COMMIT
# Set options specific to the memory check, then render the template.
pbs_options['job_name'] = 'mage_intelCheckSubmitMem'
pbs_options['walltime'] = '12:00:00'
pbs_options['walltime'] = '02:00:00'
pbs_content = mem_check_pbs_template.render(pbs_options)
with open(MEM_CHECK_PBS_FILENAME, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Set options specific to the thread check, then render the template.
pbs_options['job_name'] = 'mage_intelCheckSubmitThread'
pbs_options['walltime'] = '12:00:00'
pbs_options['walltime'] = '02:00:00'
pbs_content = thread_check_pbs_template.render(pbs_options)
with open(THREAD_CHECK_PBS_FILENAME, 'w', encoding='utf-8') as f:
f.write(pbs_content)
@@ -421,36 +460,30 @@ def main():
f"Test results are on `derecho` in `{INTEL_CHECKS_DIRECTORY}`.\n"
)
for (i_module_set, module_list_file) in enumerate(module_list_files):
if not submit_ok[i_module_set]:
test_report_details_string += (
f"Module set `{module_list_file}`: *FAILED*"
)
continue
test_report_details_string += (
f"`{MEM_CHECK_PBS_FILENAME}` for module set `{module_list_file}` "
f"submitted as PBS job {job_ids[i_module_set][0]}.\n"
)
test_report_details_string += (
f"`{THREAD_CHECK_PBS_FILENAME}` for module set "
f"`{module_list_file}` submitted as PBS job "
f"{job_ids[i_module_set][1]}.\n"
)
test_report_details_string += (
f"`{REPORT_PBS_FILENAME}` for module set `{module_list_file}` "
f"submitted as PBS job {job_ids[i_module_set][2]}.\n"
test_report_details_string = (
"Submit Intel Inspector tests for module set "
f"`{module_list_file}`: "
)
if submit_ok[i_module_set]:
test_report_details_string += "*PASSED*"
else:
test_report_details_string += "*FAILED*"
# Summarize the test results
test_report_summary_string = (
'Intel Inspector tests submitted (`intelChecks.py`).'
f"Intel Inspector test submission for `{BRANCH_OR_COMMIT}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)
print(test_report_details_string)
# If a test failed, or loud mode is on, post report to Slack.
if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
if (slack_on_fail and 'FAILED' in test_report_summary_string) or be_loud:
slack_client = common.slack_create_client()
if debug:
print(f"slack_client = {slack_client}")

View File

@@ -32,6 +32,9 @@ import common
# Program description.
DESCRIPTION = 'Create report for Intel Inspector tests.'
# Branch or commit (or tag) used for testing.
BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
def main():
"""Begin main program.
@@ -205,27 +208,38 @@ def main():
# Detail the test results
test_report_details_string = ''
test_report_details_string += (
f"Test results are in `{os.getcwd()}`.\n"
f"Intel Inspector test results are in `{os.getcwd()}`.\n"
)
test_report_details_string += 'Results of memory tests:\n'
test_report_details_string += 'Results of memory tests: '
with open(memory_errors_file, 'r', encoding='utf-8') as f:
test_report_details_string += f.read()
if 'Error' in f.read():
test_report_details_string += '*FAILED*'
else:
test_report_details_string += '*PASSED*'
test_report_details_string += '\n'
test_report_details_string += 'Results of thread tests:\n'
test_report_details_string += 'Results of thread tests: '
with open(thread_errors_file, 'r', encoding='utf-8') as f:
test_report_details_string += f.read()
if 'Error' in f.read():
test_report_details_string += '*FAILED*'
else:
test_report_details_string += '*PASSED*'
test_report_details_string += '\n'
# Summarize the test results
test_report_summary_string = (
'Intel Inspector tests complete (`intelChecksReport.py`).'
f"Intel Inspector test results for `{BRANCH_OR_COMMIT}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)
print(test_report_details_string)
# If a test failed, or loud mode is on, post report to Slack.
if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
if (slack_on_fail and 'FAILED' in test_report_summary_string) or be_loud:
slack_client = common.slack_create_client()
if debug:
print(f"slack_client = {slack_client}")

View File

@@ -1,3 +1,4 @@
# This is the standard build module set: Intel Fortran + MKL + MPI
CMAKE_ENV=FC=`which ifort` FFLAGS=-qmkl
CMAKE_OPTIONS=-DENABLE_MPI=ON -DENABLE_MKL=ON
ncarenv/23.06

View File

@@ -1,3 +1,4 @@
# THIS MODULE SET NO LONGER WORKS.
CMAKE_ENV=FC=`which ifort` FFLAGS=-qmkl
CMAKE_OPTIONS=-DENABLE_MPI=ON -DENABLE_MKL=ON
ncarenv/23.06

View File

@@ -1,3 +1,4 @@
# This module set uses gfortran and MPI.
CMAKE_OPTIONS=-DENABLE_MPI=ON
ncarenv/23.06
cmake/3.26.3

View File

@@ -1,3 +1,4 @@
# This module set uses Intel Fortran + MPI, but does not use MKL.
CMAKE_ENV=
CMAKE_OPTIONS=-DENABLE_MPI=ON -DENABLE_MKL=OFF
ncarenv/23.06

View File

@@ -1,3 +1,5 @@
# This module set uses a set of modules developed by Kevin Pham.
# It uses Intel Fortran, MKL, MPI, and the ESMF.
CMAKE_ENV=FC=`which ifort` FFLAGS=-qmkl
CMAKE_OPTIONS=-DENABLE_MPI=ON -DENABLE_MKL=ON -DALLOW_INVALID_COMPILERS=ON
ncarenv/23.09

View File

@@ -1,3 +1,4 @@
# This module set is used for running Intel Inspector checks.
CMAKE_ENV=
CMAKE_OPTIONS=-DENABLE_MPI=ON -DALLOW_INVALID_COMPILERS=ON
ncarenv/23.09

View File

@@ -1 +1 @@
01.lst
04.lst

View File

@@ -10,17 +10,28 @@ for unit testing, then a job for the test report.
There are 5 PBS job scripts used per module set. Each is generated from a
jinja2 template.
1. genTestData.pbs - Data generation. Runs in about 17 minutes on 5 derecho
nodes. Output in PBS job file genTestData.o*, and geo_mpi.out.
1. genTestData.pbs - Data generation. Runs in about 4-5 minutes on 5 derecho
nodes. Output in PBS job file genTestData.o*, and cmiD_deep_8_genRes.out.
2. runCaseTests.pbs - Runs in about 17 minutes on 1 derecho node. Only runs if
2. runCaseTests.pbs - Runs in about 35 minutes on 1 derecho node. Only runs if
genTestData.pbs completes successfully. Output in PBS log file
runCaseTests.o*, caseTests.out, and caseMpiTests.out.
3. runNonCaseTests1.pbs - Runs in about 2 minutes on 1 derecho node. Only runs
if genTestData.pbs completes successfully. Output in PBS log file
runNonCaseTests1.o*, gamTests.out, mixTests.out, voltTests.out,
baseMpiTests.out, gamMpiTests.out.
baseMpiTests.out, gamMpiTests.out. shgrTests.out.
NOTE: As of 2024-08-22, voltTests.out will contain errors like this when
run on the development branch:
...
[testebsquish.pf:151]
Squish Fake Projection Latitude value is wrong. Check Squish Processing and Output.
AssertEqual failure:
Expected: <147591.2572518899>
Actual: <143412.6753716097>
Difference: <-4178.581880280253> (greater than tolerance of .1000000000000000E-06)
...
4. runNonCaseTests2.pbs - Runs in about XX minutes on 2 derecho nodes. Only
runs if genTestData.pbs completes successfully. Output in PBS log file
@@ -144,6 +155,9 @@ RUN_NON_CASE_TESTS_1_PBS_SCRIPT = 'runNonCaseTests1.pbs'
RUN_NON_CASE_TESTS_2_PBS_SCRIPT = 'runNonCaseTests2.pbs'
UNIT_TEST_REPORT_PBS_SCRIPT = 'unitTestReport.pbs'
# Branch or commit (or tag) used for testing.
BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
# Name of file to hold job list.
JOB_LIST_FILE = 'jobs.txt'
@@ -388,6 +402,7 @@ def main():
pbs_options['job_priority'] = os.environ['DERECHO_TESTING_PRIORITY']
pbs_options['modules'] = module_names
pbs_options['kaijuhome'] = KAIJUHOME
pbs_options['branch_or_commit'] = BRANCH_OR_COMMIT
# Go to the bin directory for testing.
os.chdir(BUILD_BIN_DIR)
@@ -612,7 +627,7 @@ def main():
# Detail the test results
test_report_details_string = ''
test_report_details_string += (
f"Test results are in `{UNIT_TEST_DIRECTORY}`.\n"
f"Test results are on `derecho` in `{UNIT_TEST_DIRECTORY}`.\n"
)
for (i_module_set, module_list_file) in enumerate(module_list_files):
if not submit_ok[i_module_set]:
@@ -647,14 +662,13 @@ def main():
)
# Summarize the test results
test_report_summary_string = (
f"Unit test submission for `{os.environ['BRANCH_OR_COMMIT']}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string = (
'Fortran unit test submission: *FAILED*'
)
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string = (
'Fortran unit test submission: *PASSED*'
)
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)

View File

@@ -190,7 +190,7 @@ def main():
# Detail the test results
test_report_details_string = ''
test_report_details_string += (
f"Test results are in {os.getcwd()}.\n"
f"est results are on `derecho` in {os.getcwd()}.\n"
)
if myError:
test_report_details_string += 'Errors occurred during testing.\n'
@@ -203,12 +203,12 @@ def main():
# Summarize the test results.
test_report_summary_string = (
'Summary of Fortran unit test results from `unitTestReport.py`: '
f"Unit test results for `{os.environ['BRANCH_OR_COMMIT']}`: "
)
if myError or jobKilled or okFailure:
test_report_summary_string += '*FAILED*\n'
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string += '*PASSED*\n'
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)

View File

@@ -413,11 +413,15 @@ def main():
f"job {job_ids[i_module_set]}.\n"
)
# Summarize the test results.
test_report_summary_string = (
f"Weekly dash submission for `{os.environ['BRANCH_OR_COMMIT']}`: "
)
if 'FAILED' in test_report_details_string:
test_report_summary_string = 'Weekly dash submission: *FAILED*\n'
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string = 'Weekly dash submission: *PASSED*\n'
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)

View File

@@ -2,7 +2,8 @@
"""Create the MAGE weekly dash test report.
This script creates the MAGE weekly dash test report.
This script creates the MAGE weekly dash test report. This script assumes the
result files are in the current directory.
Authors
-------
@@ -75,9 +76,6 @@ REFERENCE_LOG_DEVELOPMENT = os.path.join(
REFERENCE_RESULTS_DIRECTORY_DEVELOPMENT, 'voltron_mpi.out'
)
# Name of subdirectory containing binaries and test results.
BIN_DIR = 'bin'
# Name of file containg PBS job IDs.
JOB_LIST_FILE = 'jobs.txt'
@@ -183,23 +181,6 @@ def main():
# ------------------------------------------------------------------------
# Move to the top-level weekly dash directory.
os.chdir(WEEKLY_DASH_DIRECTORY)
# ------------------------------------------------------------------------
# Get list of weekly dash directories.
weekly_dash_directories = glob.glob(WEEKLY_DASH_DIRECTORY_GLOB_PATTERN)
if debug:
print(f"weekly_dash_directories = {weekly_dash_directories}")
# <HACK>
# Use only the first mdirectory for now.
weekly_dash_directory = weekly_dash_directories[0]
# </HACK>
# ------------------------------------------------------------------------
# Read reference results for the master branch.
if verbose:
print('Reading reference results for real-time performance for master '
@@ -411,13 +392,7 @@ def main():
# Read results from the latest run.
if verbose:
print(f"Reading results for latest run in {weekly_dash_directory}.")
# Go to weekly dash folder
os.chdir(weekly_dash_directory)
# Move down to the directory containing the dash results.
os.chdir(BIN_DIR)
print(f"Reading results for latest run in {os.getcwd()}.")
# Read in the jobs.txt file to get the job number.
try:
@@ -805,7 +780,7 @@ def main():
# Read the CPCP values from the voltron output file.
CPCP_north_development = kh5.getTs(REMIX_OUTPUT_FILE_DEVELOPMENT,
step_IDs_development, 'nCPCP')
CPCP_south_development = kh5.getTs(remix_OUTPUT_FILE_DEVELOPMENT,
CPCP_south_development = kh5.getTs(REMIX_OUTPUT_FILE_DEVELOPMENT,
step_IDs_development, 'sCPCP')
if debug:
print(f"CPCP_north_development = {CPCP_north_development}")
@@ -894,7 +869,7 @@ def main():
# Make the magnetosphere quick-look plot.
if verbose:
print('Creating magnetosphere quicklook plot for '
f"{weekly_dash_directory}.")
f"{os.getcwd()}.")
# Create the plot.
cmd = 'msphpic.py'
@@ -915,7 +890,7 @@ def main():
# Make the REMIX quick-look plots.
if verbose:
print(f"Creating REMIX quicklook plots for {weekly_dash_directory}.")
print(f"Creating REMIX quicklook plots for {os.getcwd()}.")
# Create the plot.
cmd = 'mixpic.py'
@@ -936,7 +911,7 @@ def main():
# Make the RCM quick-look plot.
if verbose:
print(f"Creating RCM quicklook plot for {weekly_dash_directory}.")
print(f"Creating RCM quicklook plot for {os.getcwd()}.")
# Create the plot.
cmd = 'rcmpic.py'
@@ -1069,19 +1044,14 @@ def main():
if debug:
print(f"slack_client = {slack_client}")
message = (
'Weekly dash result plots complete on branch '
f"{BRANCH_OR_COMMIT}.\n"
' Latest comparative results attached as replies to this '
'message.\n'
)
message += (
f"Test results are in {os.getcwd()}.\n"
f"Weekly dash result plots complete for `{BRANCH_OR_COMMIT}`.\n"
)
slack_response = common.slack_send_message(
slack_client, message, is_test=is_test)
if slack_response['ok']:
parent_ts = slack_response['ts']
message = (
message = f"Test results are in {os.getcwd()}.\n"
message += (
'This was a 4x4x1 (IxJxK) decomposed Quad Resolution Run using'
' 8 nodes for Gamera, 1 for Voltron, and 2 Squish Helper nodes'
' (11 nodes total).'

View File

@@ -5,7 +5,7 @@
#PBS -q {{ queue }}
#PBS -l job_priority={{ job_priority }}
#PBS -l walltime={{ walltime }}
#PBS -l select=1:ncpus=128:mpiprocs=9:ompthreads=14
#PBS -l select=2:ncpus=128:mpiprocs=9:ompthreads=36
#PBS -j oe
#PBS -m abe

View File

@@ -43,6 +43,7 @@ source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo 'Setting environment variables.'
export MAGE_TEST_SET_ROOT={{ mage_test_set_root }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'
printenv

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
</Gamera>
<!-- Remix -->
<REMIX>

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -23,7 +23,7 @@
<ringknobs doVClean="T"/>
<wind tsfile="bcwind.h5"/>
<source doSource="T" doBounceDT="T"/>
<threading NumTh="18"/>
<threading NumTh="14"/>
<coupling blockHalo="T"/>
</Gamera>
<!-- Remix -->

View File

@@ -139,7 +139,7 @@ contains
end subroutine testConcCoupling
@test(npes=[9])
! @test(npes=[9])
subroutine testAsyncCoupling(this)
class (MpiTestMethod), intent(inout) :: this
@@ -227,16 +227,6 @@ contains
else
write (*,'(a,I0)') 'Testing Quick Squish ',this%getNumProcesses()
!adjust coupling parameters, must be MPI Gamera Coupler
SELECT type(cpl=>voltAppMpi%gApp)
TYPE IS (gamCouplerMpi_volt_T)
call endVoltronWaits(voltAppMpi)
voltAppMpi%doSerialMHD = .false.
cpl%doAsyncCoupling = .true.
CLASS DEFAULT
@assertTrue(.false., "Voltron Allocated non-mpi Gamera coupler for MPI Voltron Coupling Test. Failure")
ENDSELECT
! run case for 10 seconds past spinup
call runApplication(11.0_rp)
@@ -283,16 +273,6 @@ contains
else
write (*,'(a,I0)') 'Testing Quick Squish Storm ',this%getNumProcesses()
!adjust coupling parameters, must be MPI Gamera Coupler
SELECT type(cpl=>voltAppMpi%gApp)
TYPE IS (gamCouplerMpi_volt_T)
call endVoltronWaits(voltAppMpi)
voltAppMpi%doSerialMHD = .false.
cpl%doAsyncCoupling = .true.
CLASS DEFAULT
@assertTrue(.false., "Voltron Allocated non-mpi Gamera coupler for MPI Voltron Coupling Test. Failure")
ENDSELECT
! run case for 10 seconds past restart
call runApplication(611.0_rp)