diff --git a/testingScripts/build_mage-template.pbs b/testingScripts/build_mage-template.pbs
index 118aeb9c..5ae38498 100644
--- a/testingScripts/build_mage-template.pbs
+++ b/testingScripts/build_mage-template.pbs
@@ -9,8 +9,6 @@
#PBS -j oe
#PBS -m abe
-# This script just builds the MAGE software.
-
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
diff --git a/testingScripts/send_slack_message.py b/testingScripts/send_slack_message.py
new file mode 100644
index 00000000..1c480bd6
--- /dev/null
+++ b/testingScripts/send_slack_message.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+
+"""Send a message to Slack.
+
+Send a message to Slack.
+
+Authors
+-------
+Eric Winter
+"""
+
+
+# Import standard modules.
+import datetime
+# import glob
+import os
+import sys
+
+# # Import 3rd-party modules.
+
+# Import project modules.
+import common
+
+
+# Program constants
+
+# Program description.
+DESCRIPTION = "Send a message to Slack."
+
+# # Root of directory tree for this set of tests.
+# MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
+
+# # Directory for unit tests
+# UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'unitTest')
+
+# # glob pattern for naming unit test directories
+# UNIT_TEST_DIRECTORY_GLOB_PATTERN = 'unitTest_*'
+
+# # Name of build subdirectory containing binaries
+# BUILD_BIN_DIR = 'bin'
+
+# # Name of file containing job IDs for each unit test directory.
+# JOB_ID_LIST_FILE = 'jobs.txt'
+
+
+def create_command_line_parser():
+ """Create the command-line argument parser.
+
+ Create the parser for command-line arguments.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ parser : argparse.ArgumentParser
+ Command-line argument parser for this script.
+
+ Raises
+ ------
+ None
+ """
+ parser = common.create_command_line_parser(DESCRIPTION)
+ parser.add_argument(
+ "message",
+ default="",
+ help="Message to send to Slack (default: %(default)s)"
+ )
+ return parser
+
+
+def send_slack_message(args: dict = None):
+ """Send a message to Slack.
+
+ Send a message to Slack.
+
+ Parameters
+ ----------
+ args : dict
+ Dictionary of program options.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ None
+ """
+ # Local convenience variables.
+ debug = args["debug"]
+ be_loud = args["loud"]
+ slack_on_fail = args["slack_on_fail"]
+ is_test = args["test"]
+ verbose = args["verbose"]
+ message = args["message"]
+
+ # ------------------------------------------------------------------------
+
+ if debug:
+ print(f"Starting {sys.argv[0]} at {datetime.datetime.now()}")
+ print(f"Current directory is {os.getcwd()}")
+
+ # ------------------------------------------------------------------------
+
+ # Create the Slack client.
+ slack_client = common.slack_create_client()
+ slack_response_summary = common.slack_send_message(
+ slack_client, message, is_test=is_test
+ )
+
+ # ------------------------------------------------------------------------
+
+ if debug:
+ print(f"Ending {sys.argv[0]} at {datetime.datetime.now()}")
+
+
+def main():
+ """Begin main program.
+
+ This is the main program code.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ None
+ """
+ # Set up the command-line parser.
+ parser = create_command_line_parser()
+
+ # Parse the command-line arguments.
+ args = parser.parse_args()
+ if args.debug:
+ print(f"args = {args}")
+
+ # ------------------------------------------------------------------------
+
+ # Call the main program logic. Note that the Namespace object (args)
+ # returned from the option parser is converted to a dict using vars().
+ send_slack_message(vars(args))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testingScripts/unitTest-build-template.pbs b/testingScripts/unitTest-build-template.pbs
new file mode 100644
index 00000000..dd1d5085
--- /dev/null
+++ b/testingScripts/unitTest-build-template.pbs
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+#PBS -N {{ job_name }}
+#PBS -A {{ account }}
+#PBS -q {{ queue }}
+#PBS -l job_priority={{ job_priority }}
+#PBS -l select=1:ncpus=128
+#PBS -l walltime={{ walltime }}
+#PBS -j oe
+#PBS -m abe
+
+# Abort script on any error.
+set -e
+
+echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
+
+echo 'Loading modules.'
+module --force purge
+{%- for module in modules %}
+module load {{ module }}
+{%- endfor %}
+echo 'The currently loaded modules are:'
+module list
+
+echo 'The active environment variables are:'
+printenv
+
+echo 'Copying pFUnit binaries.'
+pfunit_dir="{{ mage_test_root }}/pfunit/pFUnit-4.2.0/ifort-23-mpich-derecho"
+kaiju_external_dir="{{ kaijuhome }}/external"
+cp -rp "${pfunit_dir}/FARGPARSE-1.1" "${kaiju_external_dir}/"
+cp -rp "${pfunit_dir}/GFTL-1.3" "${kaiju_external_dir}/"
+cp -rp "${pfunit_dir}/GFTL_SHARED-1.2" "${kaiju_external_dir}/"
+cp -rp "${pfunit_dir}/PFUNIT-4.2" "${kaiju_external_dir}/"
+
+# Build the code.
+cmd="{{ cmake_cmd }} >& cmake.out"
+echo $cmd
+eval $cmd
+cmd="{{ make_cmd }} >& make.out"
+echo $cmd
+eval $cmd
+
+echo "Job $PBS_JOBID ended at `date` on `hostname` in directory `pwd`."
diff --git a/testingScripts/unitTest.py b/testingScripts/unitTest.py
index 3b7f0558..d927d62b 100644
--- a/testingScripts/unitTest.py
+++ b/testingScripts/unitTest.py
@@ -3,43 +3,37 @@
"""Run MAGE Fortran unit tests.
This script runs a series of unit tests of the MAGE Fortran software. These
-tests are run as PBS jobs on derecho. There will be one job which generates
-the data for testing, then 3 dependent jobs that use the newly-generated data
-for unit testing, then a job for the test report.
+tests are run as PBS jobs on derecho. There will be one job which builds the
+code, one job which generates the data for testing, 3 jobs that use the
+newly-generated data for unit testing, and a job for the test report.
-There are 5 PBS job scripts used per module set. Each is generated from a
+There are 6 PBS job scripts used per module set. Each is generated from a
jinja2 template.
-1. genTestData.pbs - Data generation. Runs in about 4-5 minutes on 5 derecho
- nodes. Output in PBS job file genTestData.o*, and cmiD_deep_8_genRes.out.
+1. unitTest-build.pbs - Build the kaiju code for unit testing. Runs in about
+ 21 minutes on a single derecho node. Output in PBS job file
+ unitTest-build.o*, cmake.out, and make.out.
-2. runCaseTests.pbs - Runs in about 35 minutes on 1 derecho node. Only runs if
- genTestData.pbs completes successfully. Output in PBS log file
- runCaseTests.o*, caseTests.out, and caseMpiTests.out.
+2. genTestData.pbs - Data generation. Runs after the build job in about 4-5
+ minutes on 5 derecho nodes. Output in PBS job file genTestData.o*, and
+ cmiD_deep_8_genRes.out.
-3. runNonCaseTests1.pbs - Runs in about 2 minutes on 1 derecho node. Only runs
- if genTestData.pbs completes successfully. Output in PBS log file
- runNonCaseTests1.o*, gamTests.out, mixTests.out, voltTests.out,
- baseMpiTests.out, gamMpiTests.out. shgrTests.out.
- NOTE: As of 2024-08-22, voltTests.out will contain errors like this when
- run on the development branch:
+3. runCaseTests.pbs - Runs after the data generation job in about 35 minutes
+ on 1 derecho node. Output in PBS log file runCaseTests.o*, caseTests.out,
+ and caseMpiTests.out.
- ...
- [testebsquish.pf:151]
- Squish Fake Projection Latitude value is wrong. Check Squish Processing and Output.
- AssertEqual failure:
- Expected: <147591.2572518899>
- Actual: <143412.6753716097>
- Difference: <-4178.581880280253> (greater than tolerance of .1000000000000000E-06)
- ...
+4. runNonCaseTests1.pbs - Runs after the data generation job in about 6
+ minutes on 1 derecho node. Output in PBS log file runNonCaseTests1.o*,
+ gamTests.out, mixTests.out, voltTests.out, baseMpiTests.out,
+ gamMpiTests.out. shgrTests.out.
-4. runNonCaseTests2.pbs - Runs in about XX minutes on 2 derecho nodes. Only
- runs if genTestData.pbs completes successfully. Output in PBS log file
- runNonCaseTests2.o*, and voltMpiTests.out.
+5. runNonCaseTests2.pbs - Runs after the data generation job in about XX
+ minutes on 2 derecho nodes. Output in PBS log file runNonCaseTests2.o*, and
+ voltMpiTests.out.
-5. unitTestReport.pbs - Report generation. Runs in about XX minutes on 1
- derecho node. Only runs if jobs 2-4 complete successfully. Output in PBS
- log file unitTestReport.o*, and unitTestReport.out.
+6. unitTestReport.pbs - Report generation. Runs after all test jobs in about
+ XX minutes on 1 derecho node. Output in PBS log file unitTestReport.o*, and
+ unitTestReport.out.
NOTE: If this script is run as part of a set of tests for run_mage_tests.sh,
this script must be listed *last*, since it makes changes to the kaiju source
@@ -56,7 +50,6 @@ Eric Winter
# Import standard modules.
import datetime
import os
-import shutil
import subprocess
import sys
@@ -70,100 +63,669 @@ import common
# Program constants
# Program description.
-DESCRIPTION = 'Script for MAGE Fortran unit testing'
+DESCRIPTION = "Script for MAGE Fortran unit testing"
# Home directory of kaiju installation
-KAIJUHOME = os.environ['KAIJUHOME']
+KAIJUHOME = os.environ["KAIJUHOME"]
# Root of directory tree for this set of tests.
-MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
+MAGE_TEST_SET_ROOT = os.environ["MAGE_TEST_SET_ROOT"]
# Directory for unit tests
-UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'unitTest')
+UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, "unitTest")
# Top-level directory for testing on derecho.
-MAGE_TEST_ROOT = os.environ['MAGE_TEST_ROOT']
-
-# Home directory for pFUnit compiled code
-PFUNIT_HOME = os.path.join(
- MAGE_TEST_ROOT, 'pfunit', 'pFUnit-4.2.0', 'ifort-23-mpich-derecho'
-)
-
-# List of pFUnit directories to copy from PFUNIT_HOME into
-# kaiju_private/external
-PFUNIT_BINARY_DIRECTORIES = [
- 'FARGPARSE-1.1',
- 'GFTL-1.3',
- 'GFTL_SHARED-1.2',
- 'PFUNIT-4.2',
-]
-
-# Path to kaiju subdirectory for external code
-KAIJU_EXTERNAL_DIRECTORY = os.path.join(KAIJUHOME, 'external')
+MAGE_TEST_ROOT = os.environ["MAGE_TEST_ROOT"]
# Path to directory containing the test scripts
-TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, 'testingScripts')
+TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, "testingScripts")
# Path to directory containing module lists
MODULE_LIST_DIRECTORY = os.path.join(TEST_SCRIPTS_DIRECTORY,
- 'mage_build_test_modules')
+ "mage_build_test_modules")
# Name of file containing names of modules lists to use for unit tests
-UNIT_TEST_LIST_FILE = os.path.join(MODULE_LIST_DIRECTORY, 'unit_test.lst')
+UNIT_TEST_LIST_FILE = os.path.join(MODULE_LIST_DIRECTORY, "unit_test.lst")
-# Path to directory containing the unit test scripts
-UNIT_TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, 'tests')
+# Path to directory containing the unit test CODE.
+UNIT_TEST_CODE_DIRECTORY = os.path.join(KAIJUHOME, "tests")
# Paths to jinja2 template files for PBS scripts.
+BUILD_PBS_TEMPLATE = os.path.join(
+ TEST_SCRIPTS_DIRECTORY, "unitTest-build-template.pbs"
+)
DATA_GENERATION_PBS_TEMPLATE = os.path.join(
- UNIT_TEST_SCRIPTS_DIRECTORY, 'genTestData-template.pbs'
+ UNIT_TEST_CODE_DIRECTORY, "genTestData-template.pbs"
)
RUN_CASE_TESTS_PBS_TEMPLATE = os.path.join(
- UNIT_TEST_SCRIPTS_DIRECTORY, 'runCaseTests-template.pbs'
+ UNIT_TEST_CODE_DIRECTORY, "runCaseTests-template.pbs"
)
RUN_NON_CASE_TESTS_1_PBS_TEMPLATE = os.path.join(
- UNIT_TEST_SCRIPTS_DIRECTORY, 'runNonCaseTests1-template.pbs'
+ UNIT_TEST_CODE_DIRECTORY, "runNonCaseTests1-template.pbs"
)
RUN_NON_CASE_TESTS_2_PBS_TEMPLATE = os.path.join(
- UNIT_TEST_SCRIPTS_DIRECTORY, 'runNonCaseTests2-template.pbs'
+ UNIT_TEST_CODE_DIRECTORY, "runNonCaseTests2-template.pbs"
)
UNIT_TEST_REPORT_PBS_TEMPLATE = os.path.join(
- UNIT_TEST_SCRIPTS_DIRECTORY, 'unitTestReport-template.pbs'
+ UNIT_TEST_CODE_DIRECTORY, "unitTestReport-template.pbs"
)
# Prefix for naming unit test directories
-UNIT_TEST_DIRECTORY_PREFIX = 'unitTest_'
-
-# Name of build subdirectory containing binaries
-BUILD_BIN_DIR = 'bin'
-
-# Input files for unit tests
-UNIT_TEST_DATA_INPUT_DIRECTORY = os.path.join(
- os.environ['MAGE_TEST_ROOT'], 'unit_test_inputs'
-)
-UNIT_TEST_DATA_INPUT_FILES = [
- 'bcwind.h5',
- 'geo_mpi.xml',
- 'lfmD.h5',
- 'raijuconfig.h5',
-]
+UNIT_TEST_DIRECTORY_PREFIX = "unitTest_"
# Names of PBS scripts to create from templates.
-DATA_GENERATION_PBS_SCRIPT = 'genTestData.pbs'
-RUN_CASE_TESTS_PBS_SCRIPT = 'runCaseTests.pbs'
-RUN_NON_CASE_TESTS_1_PBS_SCRIPT = 'runNonCaseTests1.pbs'
-RUN_NON_CASE_TESTS_2_PBS_SCRIPT = 'runNonCaseTests2.pbs'
-UNIT_TEST_REPORT_PBS_SCRIPT = 'unitTestReport.pbs'
-
-# Branch or commit (or tag) used for testing.
-BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
+BUILD_PBS_SCRIPT = "unitTest-build.pbs"
+DATA_GENERATION_PBS_SCRIPT = "genTestData.pbs"
+RUN_CASE_TESTS_PBS_SCRIPT = "runCaseTests.pbs"
+RUN_NON_CASE_TESTS_1_PBS_SCRIPT = "runNonCaseTests1.pbs"
+RUN_NON_CASE_TESTS_2_PBS_SCRIPT = "runNonCaseTests2.pbs"
+UNIT_TEST_REPORT_PBS_SCRIPT = "unitTestReport.pbs"
# Name of file to hold job list.
-JOB_LIST_FILE = 'jobs.txt'
+JOB_LIST_FILE = "jobs.txt"
+
+
+def create_build_pbs_script(module_list_file: str):
+ """Create the PBS script to build the code.
+
+ Create the PBS script to build the code.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ #
+ # Extra argument needed for unit test build.
+ cmake_options += " -DCMAKE_BUILD_TYPE=RELWITHDEBINFO"
+ #
+
+ # Read the template for the PBS script.
+ with open(BUILD_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-build",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "00:30:00",
+ "modules": module_names,
+ "mage_test_root": MAGE_TEST_ROOT,
+ "kaijuhome": KAIJUHOME,
+ "cmake_cmd": f"{cmake_environment} cmake {cmake_options} {KAIJUHOME}",
+ "make_cmd": "make gamera_mpi voltron_mpi allTests",
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = BUILD_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def create_data_generation_pbs_script(module_list_file: str):
+ """Create the PBS script to generate the test data.
+
+ Create the PBS script to generate the test data.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ # Read the template for the PBS script.
+ with open(DATA_GENERATION_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-genTestData",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "00:30:00",
+ "modules": module_names,
+ "kaijuhome": KAIJUHOME,
+ "mage_test_root": MAGE_TEST_ROOT,
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = DATA_GENERATION_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def create_case_tests_pbs_script(module_list_file: str):
+ """Create the PBS script to run the case tests.
+
+ Create the PBS script to run the case tests.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ # Read the template for the PBS script.
+ with open(RUN_CASE_TESTS_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-caseTests",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "00:40:00",
+ "modules": module_names,
+ "kaijuhome": KAIJUHOME,
+ "mage_test_root": MAGE_TEST_ROOT,
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = RUN_CASE_TESTS_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def create_noncase_tests1_pbs_script(module_list_file: str):
+ """Create the PBS script to run the first noncase tests.
+
+ Create the PBS script to run the first noncase tests.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ # Read the template for the PBS script.
+ with open(RUN_NON_CASE_TESTS_1_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-noncaseTests1",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "01:00:00",
+ "modules": module_names,
+ "kaijuhome": KAIJUHOME,
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = RUN_NON_CASE_TESTS_1_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def create_noncase_tests2_pbs_script(module_list_file: str):
+ """Create the PBS script to run the second noncase tests.
+
+ Create the PBS script to run the second noncase tests.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ # Read the template for the PBS script.
+ with open(RUN_NON_CASE_TESTS_2_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-noncaseTests2",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "12:00:00",
+ "modules": module_names,
+ "kaijuhome": KAIJUHOME,
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = RUN_NON_CASE_TESTS_2_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def create_report_pbs_script(module_list_file: str, args: dict):
+ """Create the PBS script to run the test report.
+
+ Create the PBS script to run the test report.
+
+ Parameters
+ ----------
+ module_list_file : str
+ Path to module list file for the build.
+ args : dict
+ Command-line options and values.
+
+ Returns
+ -------
+ pbs_script_name : str
+ Name of PBS script file.
+
+ Raises
+ ------
+ None
+ """
+ # Read this module list file, extracting cmake environment and
+ # options, if any.
+ path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
+ module_names, cmake_environment, cmake_options = (
+ common.read_build_module_list_file(path)
+ )
+
+ # Read the template for the PBS script.
+ with open(UNIT_TEST_REPORT_PBS_TEMPLATE, "r", encoding="utf-8") as f:
+ template_content = f.read()
+ pbs_template = Template(template_content)
+
+ # Assemble the report options string.
+ report_options = ""
+ if args["debug"]:
+ report_options += " -d"
+ if args["loud"]:
+ report_options += " -l"
+ if args["slack_on_fail"]:
+ report_options += " -s"
+ if args["test"]:
+ report_options += " -t"
+ if args["verbose"]:
+ report_options += " -v"
+
+ # Create the options dictionary for the template.
+ options = {
+ "job_name": "unitTest-report",
+ "account": os.environ["DERECHO_TESTING_ACCOUNT"],
+ "queue": os.environ["DERECHO_TESTING_QUEUE"],
+ "job_priority": os.environ["DERECHO_TESTING_PRIORITY"],
+ "walltime": "00:10:00",
+ "modules": module_names,
+ "conda_environment": os.environ["CONDA_ENVIRONMENT"],
+ "kaijuhome": KAIJUHOME,
+ "mage_test_set_root": MAGE_TEST_SET_ROOT,
+ "slack_bot_token": os.environ["SLACK_BOT_TOKEN"],
+ "branch_or_commit": os.environ["BRANCH_OR_COMMIT"],
+ "report_options": report_options,
+ }
+
+ # Render the template.
+ pbs_content = pbs_template.render(options)
+
+ # Write the rendered file.
+ pbs_script_name = UNIT_TEST_REPORT_PBS_SCRIPT
+ with open(pbs_script_name, "w", encoding="utf-8") as f:
+ f.write(pbs_content)
+
+ # Return the name of the script.
+ return pbs_script_name
+
+
+def qsub(pbs_script: str, qsub_options: str = ""):
+ """Submit a PBS script.
+
+ Submit a PBS script.
+
+ Parameters
+ ----------
+ pbs_script : str
+ Path to script to submit.
+ qsub_options : str
+ Options for qsub command.
+
+ Returns
+ -------
+ job_id : int
+ PBS job ID
+
+ Raises
+ ------
+ subprocess.CalledProcessError
+ If an error occures when running the qsub command.
+ """
+ # Assemble the command.
+ cmd = f"qsub {qsub_options} {pbs_script}"
+ print(f"{cmd=}")
+
+ # Submit the job.
+ try:
+ cproc = subprocess.run(cmd, shell=True, check=True,
+ text=True, capture_output=True)
+ except subprocess.CalledProcessError as e:
+ print(f"qsub failed with return code {e.returncode} for script "
+ f"{pbs_script}.\n", file=sys.stderr)
+ print(e.stderr)
+ raise
+ job_id = cproc.stdout.split(".")[0]
+
+ # Return the job ID.
+ return job_id
+
+
+def unitTest(args: dict = None):
+ """Run the unit tests.
+
+ Run the unit tests.
+
+ Parameters
+ ----------
+ args : dict
+ Dictionary of program options.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ None
+ """
+ # Local convenience variables.
+ debug = args["debug"]
+ be_loud = args["loud"]
+ slack_on_fail = args["slack_on_fail"]
+ is_test = args["test"]
+ verbose = args["verbose"]
+
+ # ------------------------------------------------------------------------
+
+ if debug:
+ print(f"Starting {sys.argv[0]} at {datetime.datetime.now()}")
+ print(f"Current directory is {os.getcwd()}")
+
+ # ------------------------------------------------------------------------
+
+ # Make a directory to hold all of the Fortran unit tests.
+ if verbose:
+ print(f"Creating {UNIT_TEST_DIRECTORY}.")
+ os.mkdir(UNIT_TEST_DIRECTORY)
+
+ # ------------------------------------------------------------------------
+
+ # Make a list of module sets to build with.
+ if verbose:
+ print(f"Reading module set list from {UNIT_TEST_LIST_FILE}.")
+
+ # Read the list of module sets to use for unit tests.
+ with open(UNIT_TEST_LIST_FILE, encoding="utf-8") as f:
+ lines = f.readlines()
+ module_list_files = [_.rstrip() for _ in lines]
+ if debug:
+ print(f"module_list_files = {module_list_files}")
+
+ # ------------------------------------------------------------------------
+
+ # Create a list for submission status.
+ submit_ok = [False]*len(module_list_files)
+
+ # Run the unit tests with each set of modules.
+ for (i_module_set, module_list_file) in enumerate(module_list_files):
+ if verbose:
+ print(f"Running unit tests with module set {module_list_file}.")
+
+ # Extract the name of the list.
+ module_set_name = module_list_file.rstrip(".lst")
+ if debug:
+ print(f"{module_set_name=}")
+
+ # Make a directory for this test, and go there.
+ dir_name = f"{UNIT_TEST_DIRECTORY_PREFIX}{module_set_name}"
+ build_directory = os.path.join(UNIT_TEST_DIRECTORY, dir_name)
+ if debug:
+ print(f"{build_directory=}")
+ os.mkdir(build_directory)
+ os.chdir(build_directory)
+
+ # Create the PBS script for the build job.
+ build_pbs_script = create_build_pbs_script(module_list_file)
+
+ # Create the PBS script for data generation.
+ data_generation_pbs_script = create_data_generation_pbs_script(
+ module_list_file
+ )
+
+ # Create the PBS script for the case tests.
+ run_case_tests_pbs_script = create_case_tests_pbs_script(
+ module_list_file
+ )
+
+ # Create the PBS script for the first non-case tests.
+ run_noncase_tests1_pbs_script = create_noncase_tests1_pbs_script(
+ module_list_file
+ )
+
+ # Create the PBS script for the second non-case tests.
+ run_noncase_tests2_pbs_script = create_noncase_tests2_pbs_script(
+ module_list_file
+ )
+
+ # Create the PBS script for the unit test report.
+ run_report_pbs_script = create_report_pbs_script(
+ module_list_file, args
+ )
+
+ # Submit the build job.
+ qsub_options = ""
+ build_job_id = qsub(
+ build_pbs_script, qsub_options
+ )
+
+ # Submit the data generation job, to run after the build job is done.
+ qsub_options = f"-W depend=afterany:{build_job_id}"
+ data_generation_job_id = qsub(
+ data_generation_pbs_script, qsub_options
+ )
+
+ # Submit the case tests job, after the data generation job finishes.
+ qsub_options = f"-W depend=afterany:{data_generation_job_id}"
+ case_tests_job_id = qsub(
+ run_case_tests_pbs_script, qsub_options
+ )
+
+ # Submit the first noncase tests job, after the data generation job
+ # finishes.
+ qsub_options = f"-W depend=afterany:{data_generation_job_id}"
+ noncase_tests1_job_id = qsub(
+ run_noncase_tests1_pbs_script, qsub_options
+ )
+
+ # Submit the second noncase tests job, after the data generation job
+ # finishes.
+ qsub_options = f"-W depend=afterany:{data_generation_job_id}"
+ noncase_tests2_job_id = qsub(
+ run_noncase_tests2_pbs_script, qsub_options
+ )
+
+ # Submit the test report job, which will runs after all test jobs.
+ qsub_options = (
+ "-W depend=afterany"
+ + f":{case_tests_job_id}"
+ + f":{noncase_tests1_job_id}"
+ + f":{noncase_tests2_job_id}"
+ )
+ report_job_id = qsub(
+ run_report_pbs_script, qsub_options
+ )
+
+ # Record the job IDs for this module set in a file.
+ with open(JOB_LIST_FILE, "w", encoding="utf-8") as f:
+ f.write(f"{build_job_id}\n")
+ f.write(f"{data_generation_job_id}\n")
+ f.write(f"{case_tests_job_id}\n")
+ f.write(f"{noncase_tests1_job_id}\n")
+ f.write(f"{noncase_tests2_job_id}\n")
+ f.write(f"{report_job_id}\n")
+
+ # Record the submit status for this module set.
+ submit_ok[i_module_set] = True
+
+ # End of loop over module sets
+
+ # ------------------------------------------------------------------------
+
+ # Detail the test results
+ test_report_details_string = ""
+ test_report_details_string += (
+ f"Unit test results are on `derecho` in `{UNIT_TEST_DIRECTORY}`.\n"
+ )
+ for (i_module_set, module_list_file) in enumerate(module_list_files):
+ if not submit_ok[i_module_set]:
+ test_report_details_string += (
+ f"Unit test submit for module set `{module_list_file}`: "
+ "*FAILED*"
+ )
+
+ # Summarize the test results
+ test_report_summary_string = (
+ f"Unit test submission for `{os.environ["BRANCH_OR_COMMIT"]}`: "
+ )
+ if False in submit_ok:
+ test_report_summary_string += "*FAILED*"
+ else:
+ test_report_summary_string += "*PASSED*"
+
+ # Print the test results summary and details.
+ print(test_report_summary_string)
+ print(test_report_details_string)
+
+ # If a test failed, or loud mode is on, post report to Slack.
+ if (slack_on_fail and "FAILED" in test_report_details_string) or be_loud:
+ slack_client = common.slack_create_client()
+ slack_response_summary = common.slack_send_message(
+ slack_client, test_report_summary_string, is_test=is_test
+ )
+ thread_ts = slack_response_summary["ts"]
+ slack_response_summary = common.slack_send_message(
+ slack_client, test_report_details_string, thread_ts=thread_ts,
+ is_test=is_test
+ )
+
+ # ------------------------------------------------------------------------
+
+ if debug:
+ print(f"Ending {sys.argv[0]} at {datetime.datetime.now()}")
def main():
- """Begin main program.
+ """Main program code.
This is the main program code.
@@ -177,8 +739,7 @@ def main():
Raises
------
- subprocess.CalledProcessError
- If an exception occurs in subprocess.run()
+ None
"""
# Set up the command-line parser.
parser = common.create_command_line_parser(DESCRIPTION)
@@ -186,540 +747,15 @@ def main():
# Parse the command-line arguments.
args = parser.parse_args()
if args.debug:
- print(f"args = {args}")
- debug = args.debug
- be_loud = args.loud
- slack_on_fail = args.slack_on_fail
- is_test = args.test
- verbose = args.verbose
+ print(f"{args=}")
# ------------------------------------------------------------------------
- if debug:
- print(f"Starting {sys.argv[0]} at {datetime.datetime.now()}")
- print(f"Current directory is {os.getcwd()}")
-
- # ------------------------------------------------------------------------
-
- # Make a directory to hold all of the Fortran unit tests.
- if verbose:
- print(f"Creating ${UNIT_TEST_DIRECTORY}.")
- os.mkdir(UNIT_TEST_DIRECTORY)
-
- # ------------------------------------------------------------------------
-
- # Make a copy of the pFUnit code under kaiju/external.
- if verbose:
- print('Copying compiled pFUnit binaries.')
- for directory in PFUNIT_BINARY_DIRECTORIES:
- if not os.path.exists(os.path.join(KAIJU_EXTERNAL_DIRECTORY, directory)):
- from_path = os.path.join(PFUNIT_HOME, directory)
- to_path = os.path.join(KAIJU_EXTERNAL_DIRECTORY, directory)
- if debug:
- print(f"Copying {from_path} to {to_path}.")
- shutil.copytree(from_path, to_path)
- else:
- if debug:
- print(f"pFUnit directory {directory} already exists.")
-
- # ------------------------------------------------------------------------
-
- # Make a list of module sets to build with.
- if verbose:
- print(f"Reading module set list from {UNIT_TEST_LIST_FILE}.")
-
- # Read the list of module sets to use for unit tests.
- with open(UNIT_TEST_LIST_FILE, encoding='utf-8') as f:
- lines = f.readlines()
- module_list_files = [_.rstrip() for _ in lines]
- if debug:
- print(f"module_list_files = {module_list_files}")
-
- # ------------------------------------------------------------------------
-
- if verbose:
- print('Reading templates for PBS scripts.')
-
- # Read the template for the PBS script used for the test data generation.
- with open(DATA_GENERATION_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
- template_content = f.read()
- data_generation_pbs_template = Template(template_content)
- if debug:
- print(f"data_generation_pbs_template = {data_generation_pbs_template}")
-
- # Read the template for the PBS script used for the case tests.
- with open(RUN_CASE_TESTS_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
- template_content = f.read()
- run_case_tests_pbs_template = Template(template_content)
- if debug:
- print(f"run_case_tests_pbs_template = {run_case_tests_pbs_template}")
-
- # Read the template for the PBS script used for the 1st non-case tests.
- with open(RUN_NON_CASE_TESTS_1_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
- template_content = f.read()
- run_non_case_tests_1_pbs_template = Template(template_content)
- if debug:
- print('run_non_case_tests_1_pbs_template = '
- f"{run_non_case_tests_1_pbs_template}")
-
- # Read the template for the PBS script used for the 2nd non-case tests.
- with open(RUN_NON_CASE_TESTS_2_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
- template_content = f.read()
- run_non_case_tests_2_pbs_template = Template(template_content)
- if debug:
- print('run_non_case_tests_2_pbs_template = '
- f"{run_non_case_tests_2_pbs_template}")
-
- # Read the template for the PBS script used for report generation.
- with open(UNIT_TEST_REPORT_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
- template_content = f.read()
- unit_test_report_pbs_template = Template(template_content)
- if debug:
- print('unit_test_report_pbs_template = '
- f"{unit_test_report_pbs_template}")
-
- # ------------------------------------------------------------------------
-
- # Create the common make command for all module sets.
- make_cmd = 'make gamera_mpi voltron_mpi allTests'
- if debug:
- print(f"make_cmd = {make_cmd}")
-
- # Create the list for submit results. Only set to True if all qsub commands
- # for a set are OK.
- submit_ok = [False]*len(module_list_files)
- if debug:
- print(f"submit_ok = {submit_ok}")
-
- # Create a list of lists for job IDs. There are 5 job IDs per set - one for
- # data generration, case tests, non-case tests 1, non-case tests 2, and the
- # test report.
- job_ids = [[None, None, None, None, None]]*len(module_list_files)
- if debug:
- print(f"job_ids = {job_ids}")
-
- # Run the unit tests with each set of modules.
- for (i_module_set, module_list_file) in enumerate(module_list_files):
- if verbose:
- print('Performing unit tests tests with module set '
- f"{module_list_file}.")
-
- # Extract the name of the list.
- module_set_name = module_list_file.rstrip('.lst')
- if debug:
- print(f"module_set_name = {module_set_name}.")
-
- # --------------------------------------------------------------------
-
- # Read this module list file, extracting cmake environment and
- # options, if any.
- path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
- if debug:
- print(f"path = {path}")
- module_names, cmake_environment, cmake_options = (
- common.read_build_module_list_file(path)
- )
- if debug:
- print(f"module_names = {module_names}")
- print(f"cmake_environment = {cmake_environment}")
- print(f"cmake_options = {cmake_options}")
-
- #
- # Extra argument needed for unit test build.
- cmake_options += ' -DCMAKE_BUILD_TYPE=RELWITHDEBINFO'
- if debug:
- print(f"cmake_options = {cmake_options}")
- #
-
- # Assemble the command to load the listed modules.
- module_cmd = (
- f"module --force purge; module load {' '.join(module_names)}"
- )
- if debug:
- print(f"module_cmd = {module_cmd}")
-
- # Make a directory for this test, and go there.
- dir_name = f"{UNIT_TEST_DIRECTORY_PREFIX}{module_set_name}"
- build_directory = os.path.join(UNIT_TEST_DIRECTORY, dir_name)
- if debug:
- print(f"build_directory = {build_directory}")
- os.mkdir(build_directory)
- os.chdir(build_directory)
-
- # Run cmake to build the Makefile.
- if verbose:
- print(
- 'Running cmake to create Makefile for module set'
- f" {module_set_name}."
- )
- cmd = (
- f"{module_cmd}; {cmake_environment} cmake {cmake_options}"
- f" {KAIJUHOME} >& cmake.out"
- )
- if debug:
- print(f"cmd = {cmd}")
- try:
- # NOTE: stdout and stderr goes cmake.out.
- cproc = subprocess.run(cmd, shell=True, check=True)
- except subprocess.CalledProcessError as e:
- print(
- f"ERROR: cmake for module set {module_set_name} failed.\n"
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- f"See {os.path.join(build_directory, 'cmake.out')}"
- ' for output from cmake.\n'
- f"Skipping remaining steps for module set {module_set_name}",
- file=sys.stderr
- )
- continue
-
- # Run the build.
- if verbose:
- print(
- 'Running make to build kaiju for module set'
- f" {module_set_name}."
- )
- cmd = f"{module_cmd}; {make_cmd} >& make.out"
- if debug:
- print(f"cmd = {cmd}")
- try:
- # NOTE: stdout and stderr go into make.out.
- _ = subprocess.run(cmd, shell=True, check=True)
- except subprocess.CalledProcessError as e:
- print(
- f"ERROR: make for module set {module_set_name} failed.\n"
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- f"See {os.path.join(build_directory, 'make.out')}"
- ' for output from make.\n'
- f"Skipping remaining steps for module set {module_set_name}",
- file=sys.stderr
- )
- continue
-
- # --------------------------------------------------------------------
-
- # Assemble common data to fill in the PBS templates.
- pbs_options = {}
- pbs_options['account'] = os.environ['DERECHO_TESTING_ACCOUNT']
- pbs_options['queue'] = os.environ['DERECHO_TESTING_QUEUE']
- pbs_options['job_priority'] = os.environ['DERECHO_TESTING_PRIORITY']
- pbs_options['modules'] = module_names
- pbs_options['kaijuhome'] = KAIJUHOME
- pbs_options['branch_or_commit'] = BRANCH_OR_COMMIT
- pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
-
- # Go to the bin directory for testing.
- os.chdir(BUILD_BIN_DIR)
-
- # --------------------------------------------------------------------
-
- # Copy in inputs for unit test data generation.
- if os.path.exists(UNIT_TEST_DATA_INPUT_DIRECTORY):
- for filename in UNIT_TEST_DATA_INPUT_FILES:
- from_path = os.path.join(
- UNIT_TEST_DATA_INPUT_DIRECTORY, filename
- )
- to_path = os.path.join('.', filename)
- if debug:
- print(f"Copying {from_path} to {to_path}.")
- shutil.copyfile(from_path, to_path)
- else:
- cmd = "cda2wind -t0 2016-08-09T09:00:00 -t1 2016-08-09T11:00:00"
- if debug:
- print(f"cmd = {cmd}")
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- cmd = "genLFM"
- if debug:
- print(f"cmd = {cmd}")
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- cmd = "genRAIJU"
- if debug:
- print(f"cmd = {cmd}")
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
-
- # Set options specific to the data generation job, then render the
- # template.
- pbs_options['job_name'] = 'genTestData'
- pbs_options['walltime'] = '00:30:00'
- pbs_content = data_generation_pbs_template.render(pbs_options)
- if verbose:
- print(f"Creating {DATA_GENERATION_PBS_SCRIPT}.")
- with open(DATA_GENERATION_PBS_SCRIPT, 'w', encoding='utf-8') as f:
- f.write(pbs_content)
-
- # Run the data generation job.
- cmd = f"qsub {DATA_GENERATION_PBS_SCRIPT}"
- if debug:
- print(f"cmd = {cmd}")
- try:
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- except subprocess.CalledProcessError as e:
- print('ERROR: qsub failed.\n'
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- 'See test log for output.\n'
- 'Skipping remaining steps for module set '
- f"{module_set_name}.",
- file=sys.stderr)
- continue
- job_id = cproc.stdout.split('.')[0]
- job_ids[i_module_set][0] = job_id
- if debug:
- print(f"job_id = {job_id}")
- print(f"job_ids = {job_ids}")
-
- # --------------------------------------------------------------------
-
- # Set options specific to the case tests job, then render the
- # template.
- pbs_options['job_name'] = 'runCaseTests'
- pbs_options['walltime'] = '00:40:00'
- pbs_content = run_case_tests_pbs_template.render(pbs_options)
- if verbose:
- print(f"Creating {RUN_CASE_TESTS_PBS_SCRIPT}.")
- with open(RUN_CASE_TESTS_PBS_SCRIPT, 'w', encoding='utf-8') as f:
- f.write(pbs_content)
-
- # Run the case tests job if data was generated.
- cmd = (
- f"qsub -W depend=afterok:{job_ids[i_module_set][0]} "
- f"{RUN_CASE_TESTS_PBS_SCRIPT}"
- )
- if debug:
- print(f"cmd = {cmd}")
- try:
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- except subprocess.CalledProcessError as e:
- print('ERROR: qsub failed.\n'
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- 'See test log for output.\n'
- 'Skipping remaining steps for module set '
- f"{module_set_name}.",
- file=sys.stderr)
- continue
- job_id = cproc.stdout.split('.')[0]
- if debug:
- print(f"job_id = {job_id}")
- job_ids[i_module_set][1] = job_id
-
- # --------------------------------------------------------------------
-
- # Set options specific to the 1st non-case tests job, then render the
- # template.
- pbs_options['job_name'] = 'runNonCaseTests1'
- pbs_options['walltime'] = '01:00:00'
- if verbose:
- print(f"Creating {RUN_NON_CASE_TESTS_1_PBS_SCRIPT}.")
- pbs_content = run_non_case_tests_1_pbs_template.render(pbs_options)
- with open(RUN_NON_CASE_TESTS_1_PBS_SCRIPT, 'w', encoding='utf-8') as f:
- f.write(pbs_content)
-
- # Run the 1st non-case tests job if data was generated.
- cmd = (
- f"qsub -W depend=afterok:{job_ids[i_module_set][0]} "
- f"{RUN_NON_CASE_TESTS_1_PBS_SCRIPT}"
- )
- if debug:
- print(f"cmd = {cmd}")
- try:
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- except subprocess.CalledProcessError as e:
- print('ERROR: qsub failed.\n'
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- 'See test log for output.\n'
- 'Skipping remaining steps for module set '
- f"{module_set_name}.",
- file=sys.stderr)
- continue
- job_id = cproc.stdout.split('.')[0]
- if debug:
- print(f"job_id = {job_id}")
- job_ids[i_module_set][2] = job_id
-
- # --------------------------------------------------------------------
-
- # Set options specific to the 2nd non-case tests job, then render the
- # template.
- pbs_options['job_name'] = 'runNonCaseTests2'
- pbs_options['walltime'] = '12:00:00'
- pbs_content = run_non_case_tests_2_pbs_template.render(pbs_options)
- with open(RUN_NON_CASE_TESTS_2_PBS_SCRIPT, 'w', encoding='utf-8') as f:
- f.write(pbs_content)
-
- # Run the 2nd non-case tests job if data was generated.
- cmd = (
- f"qsub -W depend=afterok:{job_ids[i_module_set][0]} "
- f"{RUN_NON_CASE_TESTS_2_PBS_SCRIPT}"
- )
- if debug:
- print(f"cmd = {cmd}")
- try:
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- except subprocess.CalledProcessError as e:
- print('ERROR: qsub failed.\n'
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- 'See test log for output.\n'
- 'Skipping remaining steps for module set '
- f"{module_set_name}.",
- file=sys.stderr)
- continue
- job_id = cproc.stdout.split('.')[0]
- if debug:
- print(f"job_id = {job_id}")
- job_ids[i_module_set][3] = job_id
-
- # --------------------------------------------------------------------
-
- # Set options specific to the report generation job, then render the
- # template.
- pbs_options['job_name'] = 'unitTestReport'
- pbs_options['walltime'] = '00:10:00'
- pbs_options['slack_bot_token'] = os.environ['SLACK_BOT_TOKEN']
- pbs_options['mage_test_root'] = os.environ['MAGE_TEST_ROOT']
- pbs_options['mage_test_set_root'] = os.environ['MAGE_TEST_SET_ROOT']
- pbs_options['report_options'] = ''
- if debug:
- pbs_options['report_options'] += ' -d'
- if be_loud:
- pbs_options['report_options'] += ' -l'
- if slack_on_fail:
- pbs_options['report_options'] += ' -s'
- if is_test:
- pbs_options['report_options'] += ' -t'
- if verbose:
- pbs_options['report_options'] += ' -v'
- pbs_content = unit_test_report_pbs_template.render(pbs_options)
- with open(UNIT_TEST_REPORT_PBS_SCRIPT, 'w', encoding='utf-8') as f:
- f.write(pbs_content)
-
- # Run the report generation job if all others ran OK.
- cmd = (
- f"qsub -W depend=afterok:{':'.join(job_ids[i_module_set][1:4])} "
- f"{UNIT_TEST_REPORT_PBS_SCRIPT}"
- )
- if debug:
- print(f"cmd = {cmd}")
- try:
- cproc = subprocess.run(cmd, shell=True, check=True,
- text=True, capture_output=True)
- except subprocess.CalledProcessError as e:
- print('ERROR: qsub failed.\n'
- f"e.cmd = {e.cmd}\n"
- f"e.returncode = {e.returncode}\n"
- 'See test log for output.\n'
- 'Skipping remaining steps for module set '
- f"{module_set_name}.",
- file=sys.stderr)
- continue
- job_id = cproc.stdout.split('.')[0]
- if debug:
- print(f"job_id = {job_id}")
- job_ids[i_module_set][4] = job_id
-
- # --------------------------------------------------------------------
-
- # Record the job IDs for this module set in a file.
- if verbose:
- print(f"Saving job IDs for module set {module_set_name} "
- f"in {JOB_LIST_FILE}.")
- with open(JOB_LIST_FILE, 'w', encoding='utf-8') as f:
- for job_id in job_ids[i_module_set]:
- f.write(f"{job_id}\n")
-
- # This module set worked.
- submit_ok[i_module_set] = True
-
- # End of loop over module sets
- if debug:
- print(f"submit_ok = {submit_ok}")
- print(f"job_ids = {job_ids}")
-
- # ------------------------------------------------------------------------
-
- # Detail the test results
- test_report_details_string = ''
- test_report_details_string += (
- f"Test results are on `derecho` in `{UNIT_TEST_DIRECTORY}`.\n"
- )
- for (i_module_set, module_list_file) in enumerate(module_list_files):
- if not submit_ok[i_module_set]:
- test_report_details_string += (
- f"Module set `{module_list_file}`: *FAILED*"
- )
- continue
- test_report_details_string += (
- f"`{DATA_GENERATION_PBS_SCRIPT}` for module set "
- f"`{module_list_file}` submitted as PBS job "
- f"{job_ids[i_module_set][0]}.\n"
- )
- test_report_details_string += (
- f"`{RUN_CASE_TESTS_PBS_SCRIPT}` for module set "
- f"`{module_list_file}` submitted as PBS job "
- f"{job_ids[i_module_set][1]}.\n"
- )
- test_report_details_string += (
- f"`{RUN_NON_CASE_TESTS_1_PBS_SCRIPT}` for module set "
- f"`{module_list_file}` submitted as PBS job "
- f"{job_ids[i_module_set][2]}.\n"
- )
- test_report_details_string += (
- f"`{RUN_NON_CASE_TESTS_2_PBS_SCRIPT}` for module set "
- f"`{module_list_file}` submitted as PBS job "
- f"{job_ids[i_module_set][3]}.\n"
- )
- test_report_details_string += (
- f"`{UNIT_TEST_REPORT_PBS_SCRIPT}` for module set "
- f"`{module_list_file}` submitted as PBS job "
- f"{job_ids[i_module_set][4]}.\n"
- )
-
- # Summarize the test results
- test_report_summary_string = (
- f"Unit test submission for `{os.environ['BRANCH_OR_COMMIT']}`: "
- )
- if 'FAILED' in test_report_details_string:
- test_report_summary_string += '*FAILED*'
- else:
- test_report_summary_string += '*PASSED*'
-
- # Print the test results summary and details.
- print(test_report_summary_string)
- print(test_report_details_string)
-
- # If a test failed, or loud mode is on, post report to Slack.
- if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
- slack_client = common.slack_create_client()
- if debug:
- print(f"slack_client = {slack_client}")
- slack_response_summary = common.slack_send_message(
- slack_client, test_report_summary_string, is_test=is_test
- )
- if debug:
- print(f"slack_response_summary = {slack_response_summary}")
- thread_ts = slack_response_summary['ts']
- slack_response_summary = common.slack_send_message(
- slack_client, test_report_details_string, thread_ts=thread_ts,
- is_test=is_test
- )
- if debug:
- print(f"slack_response_summary = {slack_response_summary}")
-
- # ------------------------------------------------------------------------
-
- if debug:
- print(f"Ending {sys.argv[0]} at {datetime.datetime.now()}")
+ # Call the main program logic. Note that the Namespace object (args)
+ # returned from the option parser is converted to a dict using vars().
+ unitTest(vars(args))
-if __name__ == '__main__':
+if __name__ == "__main__":
+ """Begin main program."""
main()
diff --git a/testingScripts/unitTestReport.py b/testingScripts/unitTestReport.py
index 2fdc61e4..944a5518 100644
--- a/testingScripts/unitTestReport.py
+++ b/testingScripts/unitTestReport.py
@@ -111,8 +111,7 @@ def main():
print(f"Checking unit test results in {unit_test_directory}.")
# Move to the directory containing the unit test results.
- path = os.path.join(UNIT_TEST_DIRECTORY, unit_test_directory,
- BUILD_BIN_DIR)
+ path = os.path.join(UNIT_TEST_DIRECTORY, unit_test_directory)
if debug:
print(f"path = {path}")
os.chdir(path)
@@ -136,19 +135,27 @@ def main():
# NOTE: This needs to be reorganized.
# Compute the names of the job log files.
- job_file_0 = f"genTestData.o{job_ids[0]}" # 0 OKs
- job_file_1 = f"runCaseTests.o{job_ids[1]}" # 2 OKs
- job_file_2 = f"runNonCaseTests1.o{job_ids[2]}" # 6 OKs
- job_file_3 = f"runNonCaseTests2.o{job_ids[3]}" # 1 OK
- if debug:
- print(f"job_file_0 = {job_file_0}")
- print(f"job_file_1 = {job_file_1}")
- print(f"job_file_2 = {job_file_2}")
- print(f"job_file_3 = {job_file_3}")
+ # 0 OKs
+ job_file_build = f"../unitTest-build.o{job_ids[0]}"
+ # 0 OKs
+ job_file_genTestData = f"../unitTest-genTestData.o{job_ids[1]}"
+ # 2 OKs
+ job_file_caseTests = f"../unitTest-caseTests.o{job_ids[2]}"
+ # 6 OKs
+ job_file_noncaseTests1 = f"../unitTest-noncaseTests1.o{job_ids[3]}"
+ # 1 OK
+ job_file_noncaseTests2 = f"../unitTest-noncaseTests2.o{job_ids[4]}"
# Combine the results of each test log file.
+ os.chdir("bin")
bigFile = []
- job_files = [job_file_0, job_file_1, job_file_2, job_file_3]
+ job_files = [
+ job_file_build,
+ job_file_genTestData,
+ job_file_caseTests,
+ job_file_noncaseTests1,
+ job_file_noncaseTests2,
+ ]
for job_file in job_files:
with open(job_file, 'r', encoding='utf-8') as f:
bigFile += f.readlines()
@@ -234,12 +241,14 @@ def main():
)
if debug:
print(f"slack_response_summary = {slack_response_summary}")
-
+
# Also write a summary file to the root folder of this test
- with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
+ with open(os.path.join(
+ MAGE_TEST_SET_ROOT, 'testSummary.out'), 'w', encoding='utf-8'
+ ) as f:
f.write(test_report_details_string)
f.write('\n')
-
+
# ------------------------------------------------------------------------
if debug:
diff --git a/tests/genTestData-template.pbs b/tests/genTestData-template.pbs
index 3a956b54..81073100 100644
--- a/tests/genTestData-template.pbs
+++ b/tests/genTestData-template.pbs
@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
+# Abort script on any error.
+set -e
+
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,16 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
+# Move to the directory containing the compiled code.
+cd bin
+
+echo 'Copying input files.'
+test_inputs_dir="{{ mage_test_root }}/unit_test_inputs"
+cp "${test_inputs_dir}/bcwind.h5" .
+cp "${test_inputs_dir}/geo_mpi.xml" .
+cp "${test_inputs_dir}/lfmD.h5" .
+cp "${test_inputs_dir}/raijuconfig.h5" .
+
echo 'Generating data for testing.'
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"
$MPICOMMAND ./voltron_mpi.x cmiD_deep_8_genRes.xml >& cmiD_deep_8_genRes.out
diff --git a/tests/runCaseTests-template.pbs b/tests/runCaseTests-template.pbs
index ffa4f010..0495317f 100644
--- a/tests/runCaseTests-template.pbs
+++ b/tests/runCaseTests-template.pbs
@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
+# Abort script on any error.
+set -e
+
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
+# Move to the directory containing the compiled code.
+cd bin
+
echo 'Running non-MPI test cases.'
./caseTests >& caseTests.out
echo 'Non-MPI test cases complete.'
diff --git a/tests/runNonCaseTests1-template.pbs b/tests/runNonCaseTests1-template.pbs
index e3787eab..31a91e4d 100644
--- a/tests/runNonCaseTests1-template.pbs
+++ b/tests/runNonCaseTests1-template.pbs
@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
+# Abort script on any error.
+set -e
+
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
+# Move to the directory containing the compiled code.
+cd bin
+
echo 'Running GAMERA tests.'
date
./gamTests >& gamTests.out
diff --git a/tests/runNonCaseTests2-template.pbs b/tests/runNonCaseTests2-template.pbs
index 24125fa8..9bf8bd6c 100644
--- a/tests/runNonCaseTests2-template.pbs
+++ b/tests/runNonCaseTests2-template.pbs
@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
+# Abort script on any error.
+set -e
+
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -28,6 +31,9 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
+# Move to the directory containing the compiled code.
+cd bin
+
echo 'Running VOLTRON MPI tests.'
date
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"
diff --git a/tests/unitTestReport-template.pbs b/tests/unitTestReport-template.pbs
index 7c5a13fd..dba33629 100644
--- a/tests/unitTestReport-template.pbs
+++ b/tests/unitTestReport-template.pbs
@@ -9,6 +9,9 @@
#PBS -j oe
#PBS -m abe
+# Abort script on any error.
+set -e
+
echo "Job $PBS_JOBID started at `date` on `hostname` in directory `pwd`."
echo 'Loading modules.'
@@ -40,6 +43,7 @@ else
module load conda
fi
conda activate {{ conda_environment }}
+echo "The active conda environment is ${CONDA_DEFAULT_ENV}."
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
@@ -51,6 +55,13 @@ export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'
printenv
+# Move to the directory containing the compiled code.
+cd bin
+if [[ $? -eq 1 ]]; then
+ python $KAIJUHOME/testingScripts/send_slack_message.py "Unit test build failed in `pwd`!"
+ exit 1
+fi
+
echo 'Generating unit test report.'
python $KAIJUHOME/testingScripts/unitTestReport.py {{ report_options }} >& unitTestReport.out