Using versions from Jeff.

This commit is contained in:
Eric Winter
2025-08-20 12:32:13 -06:00
parent a9909aa6da
commit d1a459ed80
11 changed files with 369 additions and 308 deletions

View File

@@ -5,31 +5,39 @@
This script runs a series of unit tests of the MAGE Fortran software. These
tests are run as PBS jobs on derecho. There will be one job which generates
the data for testing, then 3 dependent jobs that use the newly-generated data
for unit testing, then a job for the test report. The test jobs build the
kaiju software using precompiled pFUnit binaries:
https://github.com/Goddard-Fortran-Ecosystem/pFUnit
for unit testing, then a job for the test report.
There are 5 PBS job scripts used per module set. Each is generated from a
jinja2 template.
1. genTestData.pbs - Data generation. Runs in about 10 minutes on 5 derecho
1. genTestData.pbs - Data generation. Runs in about 4-5 minutes on 5 derecho
nodes. Output in PBS job file genTestData.o*, and cmiD_deep_8_genRes.out.
2. runCaseTests.pbs - Runs in about 20 minutes on 1 derecho node. Only runs if
2. runCaseTests.pbs - Runs in about 35 minutes on 1 derecho node. Only runs if
genTestData.pbs completes successfully. Output in PBS log file
runCaseTests.o*, caseTests.out, and caseMpiTests.out.
3. runNonCaseTests1.pbs - Runs in about 3 minutes on 1 derecho node. Only runs
3. runNonCaseTests1.pbs - Runs in about 2 minutes on 1 derecho node. Only runs
if genTestData.pbs completes successfully. Output in PBS log file
runNonCaseTests1.o*, gamTests.out, mixTests.out, voltTests.out,
baseMpiTests.out, gamMpiTests.out. shgrTests.out.
NOTE: As of 2024-08-22, voltTests.out will contain errors like this when
run on the development branch:
4. runNonCaseTests2.pbs - Runs in about 30 minutes on 2 derecho nodes. Only
...
[testebsquish.pf:151]
Squish Fake Projection Latitude value is wrong. Check Squish Processing and Output.
AssertEqual failure:
Expected: <147591.2572518899>
Actual: <143412.6753716097>
Difference: <-4178.581880280253> (greater than tolerance of .1000000000000000E-06)
...
4. runNonCaseTests2.pbs - Runs in about XX minutes on 2 derecho nodes. Only
runs if genTestData.pbs completes successfully. Output in PBS log file
runNonCaseTests2.o*, and voltMpiTests.out.
5. unitTestReport.pbs - Report generation. Runs in a few seconds on 1
5. unitTestReport.pbs - Report generation. Runs in about XX minutes on 1
derecho node. Only runs if jobs 2-4 complete successfully. Output in PBS
log file unitTestReport.o*, and unitTestReport.out.
@@ -41,6 +49,7 @@ Authors
-------
Jeff Garretson
Eric Winter
"""
@@ -61,157 +70,128 @@ import common
# Program constants
# Program description.
DESCRIPTION = "Script to perform kaiju Fortran unit testing."
# Default values for command-line arguments when none are supplied (such as
# when this code is called by external code).
args_default = {
"debug": False,
"loud": False,
"slack_on_fail": False,
"test": False,
"verbose": False,
}
DESCRIPTION = 'Script for MAGE Fortran unit testing'
# Home directory of kaiju installation
KAIJUHOME = os.environ["KAIJUHOME"]
KAIJUHOME = os.environ['KAIJUHOME']
# Root of directory tree for this set of tests.
KAIJU_TEST_SET_ROOT = os.environ["KAIJU_TEST_SET_ROOT"]
MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
# Directory for unit tests
UNIT_TEST_DIRECTORY = os.path.join(KAIJU_TEST_SET_ROOT, "unitTest")
UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'unitTest')
# Top-level directory for testing on derecho.
KAIJU_TEST_ROOT = os.environ["KAIJU_TEST_ROOT"]
MAGE_TEST_ROOT = os.environ['MAGE_TEST_ROOT']
# Home directory for pFUnit compiled code
PFUNIT_HOME = os.path.join(
KAIJU_TEST_ROOT, "pfunit", "pFUnit-4.2.0", "ifort-23-mpich-derecho"
MAGE_TEST_ROOT, 'pfunit', 'pFUnit-4.2.0', 'ifort-23-mpich-derecho'
)
# List of pFUnit directories to copy from PFUNIT_HOME into
# kaiju_private/external
PFUNIT_BINARY_DIRECTORIES = [
"FARGPARSE-1.1",
"GFTL-1.3",
"GFTL_SHARED-1.2",
"PFUNIT-4.2",
'FARGPARSE-1.1',
'GFTL-1.3',
'GFTL_SHARED-1.2',
'PFUNIT-4.2',
]
# Path to kaiju subdirectory for external code
KAIJU_EXTERNAL_DIRECTORY = os.path.join(KAIJUHOME, "external")
KAIJU_EXTERNAL_DIRECTORY = os.path.join(KAIJUHOME, 'external')
# Path to directory containing the test scripts
TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, "testingScripts")
TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, 'testingScripts')
# Path to directory containing module lists
MODULE_LIST_DIRECTORY = os.path.join(TEST_SCRIPTS_DIRECTORY,
"mage_build_test_modules")
'mage_build_test_modules')
# Name of file containing names of modules lists to use for unit tests
UNIT_TEST_LIST_FILE = os.path.join(MODULE_LIST_DIRECTORY, "unit_test.lst")
UNIT_TEST_LIST_FILE = os.path.join(MODULE_LIST_DIRECTORY, 'unit_test.lst')
# Path to directory containing the unit test scripts
UNIT_TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, "tests")
UNIT_TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, 'tests')
# Paths to jinja2 template files for PBS scripts.
DATA_GENERATION_PBS_TEMPLATE = os.path.join(
UNIT_TEST_SCRIPTS_DIRECTORY, "genTestData-template.pbs"
UNIT_TEST_SCRIPTS_DIRECTORY, 'genTestData-template.pbs'
)
RUN_CASE_TESTS_PBS_TEMPLATE = os.path.join(
UNIT_TEST_SCRIPTS_DIRECTORY, "runCaseTests-template.pbs"
UNIT_TEST_SCRIPTS_DIRECTORY, 'runCaseTests-template.pbs'
)
RUN_NON_CASE_TESTS_1_PBS_TEMPLATE = os.path.join(
UNIT_TEST_SCRIPTS_DIRECTORY, "runNonCaseTests1-template.pbs"
UNIT_TEST_SCRIPTS_DIRECTORY, 'runNonCaseTests1-template.pbs'
)
RUN_NON_CASE_TESTS_2_PBS_TEMPLATE = os.path.join(
UNIT_TEST_SCRIPTS_DIRECTORY, "runNonCaseTests2-template.pbs"
UNIT_TEST_SCRIPTS_DIRECTORY, 'runNonCaseTests2-template.pbs'
)
UNIT_TEST_REPORT_PBS_TEMPLATE = os.path.join(
UNIT_TEST_SCRIPTS_DIRECTORY, "unitTestReport-template.pbs"
UNIT_TEST_SCRIPTS_DIRECTORY, 'unitTestReport-template.pbs'
)
# Prefix for naming unit test directories
UNIT_TEST_DIRECTORY_PREFIX = "unitTest_"
UNIT_TEST_DIRECTORY_PREFIX = 'unitTest_'
# Name of build subdirectory containing binaries
BUILD_BIN_DIR = "bin"
BUILD_BIN_DIR = 'bin'
# Input files for unit tests
UNIT_TEST_DATA_INPUT_DIRECTORY = os.path.join(
KAIJU_TEST_ROOT, "unit_test_inputs"
os.environ['MAGE_TEST_ROOT'], 'unit_test_inputs'
)
UNIT_TEST_DATA_INPUT_FILES = [
"bcwind.h5",
# "geo_mpi.xml",
"lfmD.h5",
'bcwind.h5',
'geo_mpi.xml',
'lfmD.h5',
'raijuconfig.h5',
]
# Names of PBS scripts to create from templates.
DATA_GENERATION_PBS_SCRIPT = "genTestData.pbs"
RUN_CASE_TESTS_PBS_SCRIPT = "runCaseTests.pbs"
RUN_NON_CASE_TESTS_1_PBS_SCRIPT = "runNonCaseTests1.pbs"
RUN_NON_CASE_TESTS_2_PBS_SCRIPT = "runNonCaseTests2.pbs"
UNIT_TEST_REPORT_PBS_SCRIPT = "unitTestReport.pbs"
DATA_GENERATION_PBS_SCRIPT = 'genTestData.pbs'
RUN_CASE_TESTS_PBS_SCRIPT = 'runCaseTests.pbs'
RUN_NON_CASE_TESTS_1_PBS_SCRIPT = 'runNonCaseTests1.pbs'
RUN_NON_CASE_TESTS_2_PBS_SCRIPT = 'runNonCaseTests2.pbs'
UNIT_TEST_REPORT_PBS_SCRIPT = 'unitTestReport.pbs'
# Branch or commit (or tag) used for testing.
BRANCH_OR_COMMIT = os.environ["BRANCH_OR_COMMIT"]
BRANCH_OR_COMMIT = os.environ['BRANCH_OR_COMMIT']
# Name of file to hold job list.
JOB_LIST_FILE = "jobs.txt"
JOB_LIST_FILE = 'jobs.txt'
def create_command_line_parser():
"""Create the command-line argument parser.
Create the parser for command-line arguments.
Parameters
----------
None
Returns
-------
parser : argparse.ArgumentParser
Command-line argument parser for this script.
Raises
------
None
"""
parser = common.create_command_line_parser(DESCRIPTION)
return parser
def unitTest(args: dict = None):
def main():
"""Begin main program.
This is the main program code. This function can be called from other
python code.
This is the main program code.
Parameters
----------
args : dict
Dictionary of command-line options and equivalent options passed from
the calling function, and variables set by engage for makeitso.
None
Returns
-------
int 0 if OK, else 1
None
Raises
------
subprocess.CalledProcessError
If an exception occurs in subprocess.run()
"""
# Set missing arguments to defaults.
args = args_default | args
debug = args["debug"]
loud = args["loud"]
slack_on_fail = args["slack_on_fail"]
test = args["test"]
verbose = args["verbose"]
# Set up the command-line parser.
parser = common.create_command_line_parser(DESCRIPTION)
# Parse the command-line arguments.
args = parser.parse_args()
if args.debug:
print(f"args = {args}")
debug = args.debug
be_loud = args.loud
slack_on_fail = args.slack_on_fail
is_test = args.test
verbose = args.verbose
# ------------------------------------------------------------------------
@@ -228,9 +208,9 @@ def unitTest(args: dict = None):
# ------------------------------------------------------------------------
# Make a copy of the pFUnit code under external.
# Make a copy of the pFUnit code under kaiju/external.
if verbose:
print("Copying compiled pFUnit binaries.")
print('Copying compiled pFUnit binaries.')
for directory in PFUNIT_BINARY_DIRECTORIES:
if not os.path.exists(os.path.join(KAIJU_EXTERNAL_DIRECTORY, directory)):
from_path = os.path.join(PFUNIT_HOME, directory)
@@ -249,82 +229,85 @@ def unitTest(args: dict = None):
print(f"Reading module set list from {UNIT_TEST_LIST_FILE}.")
# Read the list of module sets to use for unit tests.
with open(UNIT_TEST_LIST_FILE, encoding="utf-8") as f:
with open(UNIT_TEST_LIST_FILE, encoding='utf-8') as f:
lines = f.readlines()
module_list_files = [_.rstrip() for _ in lines]
if debug:
print(f"{module_list_files=}")
print(f"module_list_files = {module_list_files}")
# ------------------------------------------------------------------------
if verbose:
print("Reading templates for PBS scripts.")
print('Reading templates for PBS scripts.')
# Read the template for the PBS script used for the test data generation.
with open(DATA_GENERATION_PBS_TEMPLATE, "r", encoding="utf-8") as f:
with open(DATA_GENERATION_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
template_content = f.read()
data_generation_pbs_template = Template(template_content)
if debug:
print(f"{data_generation_pbs_template=}")
print(f"data_generation_pbs_template = {data_generation_pbs_template}")
# Read the template for the PBS script used for the case tests.
with open(RUN_CASE_TESTS_PBS_TEMPLATE, "r", encoding="utf-8") as f:
with open(RUN_CASE_TESTS_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
template_content = f.read()
run_case_tests_pbs_template = Template(template_content)
if debug:
print(f"{run_case_tests_pbs_template=}")
print(f"run_case_tests_pbs_template = {run_case_tests_pbs_template}")
# Read the template for the PBS script used for the 1st non-case tests.
with open(RUN_NON_CASE_TESTS_1_PBS_TEMPLATE, "r", encoding="utf-8") as f:
with open(RUN_NON_CASE_TESTS_1_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
template_content = f.read()
run_non_case_tests_1_pbs_template = Template(template_content)
if debug:
print(f"{run_non_case_tests_1_pbs_template=}")
print('run_non_case_tests_1_pbs_template = '
f"{run_non_case_tests_1_pbs_template}")
# Read the template for the PBS script used for the 2nd non-case tests.
with open(RUN_NON_CASE_TESTS_2_PBS_TEMPLATE, "r", encoding="utf-8") as f:
with open(RUN_NON_CASE_TESTS_2_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
template_content = f.read()
run_non_case_tests_2_pbs_template = Template(template_content)
if debug:
print(f"{run_non_case_tests_2_pbs_template=}")
print('run_non_case_tests_2_pbs_template = '
f"{run_non_case_tests_2_pbs_template}")
# Read the template for the PBS script used for report generation.
with open(UNIT_TEST_REPORT_PBS_TEMPLATE, "r", encoding="utf-8") as f:
with open(UNIT_TEST_REPORT_PBS_TEMPLATE, 'r', encoding='utf-8') as f:
template_content = f.read()
unit_test_report_pbs_template = Template(template_content)
if debug:
print(f"{unit_test_report_pbs_template=}")
print('unit_test_report_pbs_template = '
f"{unit_test_report_pbs_template}")
# ------------------------------------------------------------------------
# Create the common make command for all module sets.
make_cmd = "make gamera_mpi voltron_mpi allTests"
make_cmd = 'make gamera_mpi voltron_mpi allTests'
if debug:
print(f"{make_cmd=}")
print(f"make_cmd = {make_cmd}")
# Create the list for submit results. Only set to True if all qsub commands
# for a set are OK.
submit_ok = [False]*len(module_list_files)
if debug:
print(f"{submit_ok=}")
print(f"submit_ok = {submit_ok}")
# Create a list of lists for job IDs. There are 5 job IDs per set - one for
# data generration, case tests, non-case tests 1, non-case tests 2, and the
# test report.
job_ids = [[None, None, None, None, None]]*len(module_list_files)
if debug:
print(f"{job_ids=}")
print(f"job_ids = {job_ids}")
# Run the unit tests with each set of modules.
for (i_module_set, module_list_file) in enumerate(module_list_files):
if verbose:
print("Performing unit tests tests with module set "
print('Performing unit tests tests with module set '
f"{module_list_file}.")
# Extract the name of the list.
module_set_name = module_list_file.rstrip(".lst")
module_set_name = module_list_file.rstrip('.lst')
if debug:
print(f"{module_set_name=}.")
print(f"module_set_name = {module_set_name}.")
# --------------------------------------------------------------------
@@ -332,41 +315,41 @@ def unitTest(args: dict = None):
# options, if any.
path = os.path.join(MODULE_LIST_DIRECTORY, module_list_file)
if debug:
print(f"{path=}")
print(f"path = {path}")
module_names, cmake_environment, cmake_options = (
common.read_build_module_list_file(path)
)
if debug:
print(f"{module_names=}")
print(f"{cmake_environment=}")
print(f"{cmake_options=}")
print(f"module_names = {module_names}")
print(f"cmake_environment = {cmake_environment}")
print(f"cmake_options = {cmake_options}")
# <HACK>
# Extra argument needed for unit test build.
cmake_options += " -DCMAKE_BUILD_TYPE=RELWITHDEBINFO"
cmake_options += ' -DCMAKE_BUILD_TYPE=RELWITHDEBINFO'
if debug:
print(f"{cmake_options=}")
print(f"cmake_options = {cmake_options}")
# </HACK>
# Assemble the command to load the listed modules.
module_cmd = (
f"module --force purge; module load {" ".join(module_names)}"
f"module --force purge; module load {' '.join(module_names)}"
)
if debug:
print(f"{module_cmd=}")
print(f"module_cmd = {module_cmd}")
# Make a directory for this test, and go there.
dir_name = f"{UNIT_TEST_DIRECTORY_PREFIX}{module_set_name}"
build_directory = os.path.join(UNIT_TEST_DIRECTORY, dir_name)
if debug:
print(f"{build_directory=}")
print(f"build_directory = {build_directory}")
os.mkdir(build_directory)
os.chdir(build_directory)
# Run cmake to build the Makefile.
if verbose:
print(
"Running cmake to create Makefile for module set"
'Running cmake to create Makefile for module set'
f" {module_set_name}."
)
cmd = (
@@ -374,17 +357,17 @@ def unitTest(args: dict = None):
f" {KAIJUHOME} >& cmake.out"
)
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
# NOTE: stdout and stderr go cmake.out.
_ = subprocess.run(cmd, shell=True, check=True)
# NOTE: stdout and stderr goes cmake.out.
cproc = subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(
f"ERROR: cmake for module set {module_set_name} failed.\n"
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
f"See {os.path.join(build_directory, 'cmake.out')}"
" for output from cmake.\n"
' for output from cmake.\n'
f"Skipping remaining steps for module set {module_set_name}",
file=sys.stderr
)
@@ -393,12 +376,12 @@ def unitTest(args: dict = None):
# Run the build.
if verbose:
print(
"Running make to build kaiju for module set"
'Running make to build kaiju for module set'
f" {module_set_name}."
)
cmd = f"{module_cmd}; {make_cmd} >& make.out"
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
# NOTE: stdout and stderr go into make.out.
_ = subprocess.run(cmd, shell=True, check=True)
@@ -408,7 +391,7 @@ def unitTest(args: dict = None):
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
f"See {os.path.join(build_directory, 'make.out')}"
" for output from make.\n"
' for output from make.\n'
f"Skipping remaining steps for module set {module_set_name}",
file=sys.stderr
)
@@ -427,8 +410,6 @@ def unitTest(args: dict = None):
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
# Go to the bin directory for testing.
if verbose:
print(f"Moving to {BUILD_BIN_DIR}.")
os.chdir(BUILD_BIN_DIR)
# --------------------------------------------------------------------
@@ -462,46 +443,46 @@ def unitTest(args: dict = None):
# Set options specific to the data generation job, then render the
# template.
pbs_options["job_name"] = "genTestData"
pbs_options["walltime"] = "00:20:00"
pbs_options['job_name'] = 'genTestData'
pbs_options['walltime'] = '02:00:00'
pbs_content = data_generation_pbs_template.render(pbs_options)
if verbose:
print(f"Creating {DATA_GENERATION_PBS_SCRIPT}.")
with open(DATA_GENERATION_PBS_SCRIPT, "w", encoding="utf-8") as f:
with open(DATA_GENERATION_PBS_SCRIPT, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Run the data generation job.
cmd = f"qsub {DATA_GENERATION_PBS_SCRIPT}"
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True,
text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("ERROR: qsub failed.\n"
print('ERROR: qsub failed.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See test log for output.\n"
"Skipping remaining steps for module set "
'See test log for output.\n'
'Skipping remaining steps for module set '
f"{module_set_name}.",
file=sys.stderr)
continue
job_id = cproc.stdout.split(".")[0]
job_id = cproc.stdout.split('.')[0]
job_ids[i_module_set][0] = job_id
if debug:
print(f"{job_id=}")
print(f"{job_ids=}")
print(f"job_id = {job_id}")
print(f"job_ids = {job_ids}")
# --------------------------------------------------------------------
# Set options specific to the case tests job, then render the
# template.
pbs_options["job_name"] = "runCaseTests"
pbs_options["walltime"] = "00:40:00"
pbs_options['job_name'] = 'runCaseTests'
pbs_options['walltime'] = '00:40:00'
pbs_content = run_case_tests_pbs_template.render(pbs_options)
if verbose:
print(f"Creating {RUN_CASE_TESTS_PBS_SCRIPT}.")
with open(RUN_CASE_TESTS_PBS_SCRIPT, "w", encoding="utf-8") as f:
with open(RUN_CASE_TESTS_PBS_SCRIPT, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Run the case tests job if data was generated.
@@ -510,22 +491,22 @@ def unitTest(args: dict = None):
f"{RUN_CASE_TESTS_PBS_SCRIPT}"
)
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True,
text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("ERROR: qsub failed.\n"
print('ERROR: qsub failed.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See test log for output.\n"
"Skipping remaining steps for module set "
'See test log for output.\n'
'Skipping remaining steps for module set '
f"{module_set_name}.",
file=sys.stderr)
continue
job_id = cproc.stdout.split(".")[0]
job_id = cproc.stdout.split('.')[0]
if debug:
print(f"{job_id=}")
print(f"job_id = {job_id}")
job_ids[i_module_set][1] = job_id
# --------------------------------------------------------------------
@@ -537,7 +518,7 @@ def unitTest(args: dict = None):
if verbose:
print(f"Creating {RUN_NON_CASE_TESTS_1_PBS_SCRIPT}.")
pbs_content = run_non_case_tests_1_pbs_template.render(pbs_options)
with open(RUN_NON_CASE_TESTS_1_PBS_SCRIPT, "w", encoding="utf-8") as f:
with open(RUN_NON_CASE_TESTS_1_PBS_SCRIPT, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Run the 1st non-case tests job if data was generated.
@@ -546,32 +527,32 @@ def unitTest(args: dict = None):
f"{RUN_NON_CASE_TESTS_1_PBS_SCRIPT}"
)
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True,
text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("ERROR: qsub failed.\n"
print('ERROR: qsub failed.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See test log for output.\n"
"Skipping remaining steps for module set "
'See test log for output.\n'
'Skipping remaining steps for module set '
f"{module_set_name}.",
file=sys.stderr)
continue
job_id = cproc.stdout.split(".")[0]
job_id = cproc.stdout.split('.')[0]
if debug:
print(f"{job_id=}")
print(f"job_id = {job_id}")
job_ids[i_module_set][2] = job_id
# --------------------------------------------------------------------
# Set options specific to the 2nd non-case tests job, then render the
# template.
pbs_options["job_name"] = "runNonCaseTests2"
pbs_options["walltime"] = "12:00:00"
pbs_options['job_name'] = 'runNonCaseTests2'
pbs_options['walltime'] = '12:00:00'
pbs_content = run_non_case_tests_2_pbs_template.render(pbs_options)
with open(RUN_NON_CASE_TESTS_2_PBS_SCRIPT, "w", encoding="utf-8") as f:
with open(RUN_NON_CASE_TESTS_2_PBS_SCRIPT, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Run the 2nd non-case tests job if data was generated.
@@ -580,46 +561,46 @@ def unitTest(args: dict = None):
f"{RUN_NON_CASE_TESTS_2_PBS_SCRIPT}"
)
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True,
text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("ERROR: qsub failed.\n"
print('ERROR: qsub failed.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See test log for output.\n"
"Skipping remaining steps for module set "
'See test log for output.\n'
'Skipping remaining steps for module set '
f"{module_set_name}.",
file=sys.stderr)
continue
job_id = cproc.stdout.split(".")[0]
job_id = cproc.stdout.split('.')[0]
if debug:
print(f"{job_id=}")
print(f"job_id = {job_id}")
job_ids[i_module_set][3] = job_id
# --------------------------------------------------------------------
# Set options specific to the report generation job, then render the
# template.
pbs_options["job_name"] = "unitTestReport"
pbs_options["walltime"] = "00:10:00"
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
pbs_options["kaiju_test_root"] = KAIJU_TEST_ROOT
pbs_options["kaiju_test_set_root"] = os.environ["KAIJU_TEST_SET_ROOT"]
pbs_options["report_options"] = ""
pbs_options['job_name'] = 'unitTestReport'
pbs_options['walltime'] = '00:10:00'
pbs_options['slack_bot_token'] = os.environ['SLACK_BOT_TOKEN']
pbs_options['mage_test_root'] = os.environ['MAGE_TEST_ROOT']
pbs_options['mage_test_set_root'] = os.environ['MAGE_TEST_SET_ROOT']
pbs_options['report_options'] = ''
if debug:
pbs_options["report_options"] += " -d"
if loud:
pbs_options["report_options"] += " -l"
pbs_options['report_options'] += ' -d'
if be_loud:
pbs_options['report_options'] += ' -l'
if slack_on_fail:
pbs_options["report_options"] += " -s"
if test:
pbs_options["report_options"] += " -t"
pbs_options['report_options'] += ' -s'
if is_test:
pbs_options['report_options'] += ' -t'
if verbose:
pbs_options["report_options"] += " -v"
pbs_options['report_options'] += ' -v'
pbs_content = unit_test_report_pbs_template.render(pbs_options)
with open(UNIT_TEST_REPORT_PBS_SCRIPT, "w", encoding="utf-8") as f:
with open(UNIT_TEST_REPORT_PBS_SCRIPT, 'w', encoding='utf-8') as f:
f.write(pbs_content)
# Run the report generation job if all others ran OK.
@@ -628,22 +609,22 @@ def unitTest(args: dict = None):
f"{UNIT_TEST_REPORT_PBS_SCRIPT}"
)
if debug:
print(f"{cmd=}")
print(f"cmd = {cmd}")
try:
cproc = subprocess.run(cmd, shell=True, check=True,
text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print("ERROR: qsub failed.\n"
print('ERROR: qsub failed.\n'
f"e.cmd = {e.cmd}\n"
f"e.returncode = {e.returncode}\n"
"See test log for output.\n"
"Skipping remaining steps for module set "
'See test log for output.\n'
'Skipping remaining steps for module set '
f"{module_set_name}.",
file=sys.stderr)
continue
job_id = cproc.stdout.split(".")[0]
job_id = cproc.stdout.split('.')[0]
if debug:
print(f"{job_id=}")
print(f"job_id = {job_id}")
job_ids[i_module_set][4] = job_id
# --------------------------------------------------------------------
@@ -652,7 +633,7 @@ def unitTest(args: dict = None):
if verbose:
print(f"Saving job IDs for module set {module_set_name} "
f"in {JOB_LIST_FILE}.")
with open(JOB_LIST_FILE, "w", encoding="utf-8") as f:
with open(JOB_LIST_FILE, 'w', encoding='utf-8') as f:
for job_id in job_ids[i_module_set]:
f.write(f"{job_id}\n")
@@ -661,13 +642,13 @@ def unitTest(args: dict = None):
# End of loop over module sets
if debug:
print(f"{submit_ok=}")
print(f"{job_ids=}")
print(f"submit_ok = {submit_ok}")
print(f"job_ids = {job_ids}")
# ------------------------------------------------------------------------
# Detail the test results
test_report_details_string = ""
test_report_details_string = ''
test_report_details_string += (
f"Test results are on `derecho` in `{UNIT_TEST_DIRECTORY}`.\n"
)
@@ -707,77 +688,38 @@ def unitTest(args: dict = None):
test_report_summary_string = (
f"Unit test submission for `{os.environ['BRANCH_OR_COMMIT']}`: "
)
if "FAILED" in test_report_details_string:
test_report_summary_string += "*FAILED*"
if 'FAILED' in test_report_details_string:
test_report_summary_string += '*FAILED*'
else:
test_report_summary_string += "*PASSED*"
test_report_summary_string += '*PASSED*'
# Print the test results summary and details.
print(test_report_summary_string)
print(test_report_details_string)
# If a test failed, or loud mode is on, post report to Slack.
if (slack_on_fail and "FAILED" in test_report_details_string) or loud:
if (slack_on_fail and 'FAILED' in test_report_details_string) or be_loud:
slack_client = common.slack_create_client()
if debug:
print(f"{slack_client=}")
print(f"slack_client = {slack_client}")
slack_response_summary = common.slack_send_message(
slack_client, test_report_summary_string, is_test=test
slack_client, test_report_summary_string, is_test=is_test
)
if debug:
print(f"slack_{slack_response_summary=}")
thread_ts = slack_response_summary["ts"]
print(f"slack_response_summary = {slack_response_summary}")
thread_ts = slack_response_summary['ts']
slack_response_summary = common.slack_send_message(
slack_client, test_report_details_string, thread_ts=thread_ts,
is_test=test
is_test=is_test
)
if debug:
print(f"{slack_response_summary=}")
print(f"slack_response_summary = {slack_response_summary}")
# ------------------------------------------------------------------------
if debug:
print(f"Ending {sys.argv[0]} at {datetime.datetime.now()}")
# Return nominal status.
return 0
def main():
"""Main program code for the command-line version of this file.
This is the main program code for the command-line version of this file.
It processes command-line options, then calls the primary code.
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
# Set up the command-line parser.
parser = create_command_line_parser()
# Parse the command-line arguments.
args = parser.parse_args()
if args.debug:
print(f"{args=}")
# ------------------------------------------------------------------------
# Call the main program logic. Note that the Namespace object (args)
# returned from the option parser is converted to a dict using vars().
unitTest(vars(args))
# Exit normally.
sys.exit(0)
if __name__ == "__main__":
if __name__ == '__main__':
main()

View File

@@ -34,10 +34,10 @@ import common
DESCRIPTION = 'Report on the MAGE Fortran unit test results.'
# Root of directory tree for this set of tests.
KAIJU_TEST_SET_ROOT = os.environ['KAIJU_TEST_SET_ROOT']
MAGE_TEST_SET_ROOT = os.environ['MAGE_TEST_SET_ROOT']
# Directory for unit tests
UNIT_TEST_DIRECTORY = os.path.join(KAIJU_TEST_SET_ROOT, 'unitTest')
UNIT_TEST_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, 'unitTest')
# glob pattern for naming unit test directories
UNIT_TEST_DIRECTORY_GLOB_PATTERN = 'unitTest_*'
@@ -137,8 +137,8 @@ def main():
# Compute the names of the job log files.
job_file_0 = f"genTestData.o{job_ids[0]}" # 0 OKs
job_file_1 = f"runCaseTests.o{job_ids[1]}" # 2 OKs
job_file_2 = f"runNonCaseTests1.o{job_ids[2]}" # 6 OKs
job_file_1 = f"runCaseTests.o{job_ids[1]}" # 2 OKs
job_file_2 = f"runNonCaseTests1.o{job_ids[2]}" # 7 OKs
job_file_3 = f"runNonCaseTests2.o{job_ids[3]}" # 1 OK
if debug:
print(f"job_file_0 = {job_file_0}")
@@ -164,8 +164,8 @@ def main():
elif 'job killed' in line:
jobKilled = True
# There should be exactly 9 OKs.
OK_COUNT_EXPECTED = 9
# There should be exactly 10 OKs.
OK_COUNT_EXPECTED = 10
if verbose:
print(f"Found {okCount} OKs, expected {OK_COUNT_EXPECTED}.")
if okCount != OK_COUNT_EXPECTED:
@@ -234,7 +234,12 @@ def main():
)
if debug:
print(f"slack_response_summary = {slack_response_summary}")
# Also write a summary file to the root folder of this test
with open(os.path.join(MAGE_TEST_SET_ROOT,'testSummary.out'), 'w', encoding='utf-8') as f:
f.write(test_report_details_string)
f.write('\n')
# ------------------------------------------------------------------------
if debug:

View File

@@ -20,28 +20,19 @@ echo 'The currently loaded modules are:'
module list
echo 'Loading python environment.'
mage_test_root=$HOME
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
export CONDARC="${mage_test_root}/.condarc"
export CONDA_ENVS_PATH="${mage_test_root}/.conda"
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
mage_miniconda3="${HOME}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
echo 'Loading conda module'
module load conda
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo "The current conda environment is ${CONDA_PREFIX}."
@@ -55,8 +46,8 @@ export SLACK_BOT_TOKEN={{ slack_bot_token }}
export OMP_NUM_THREADS=128
export MPI_TYPE_DEPTH=32
export KMP_STACKSIZE=128M
export KAIJU_TEST_ROOT={{ kaiju_test_root }}
export KAIJU_TEST_SET_ROOT={{ kaiju_test_set_root }}
export MAGE_TEST_ROOT="{{ mage_test_root }}"
export MAGE_TEST_SET_ROOT={{ mage_test_set_root }}
export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'
@@ -86,9 +77,9 @@ echo 'Generating the solar wind boundary condition file.'
{{ cda2wind_cmd }}
echo "The solar wind boundary condition file is `ls bcwind.h5`."
# Generate tjhe raiju configuration file.
echo 'Generating the RAIJU configuration file.'
genRAIJU
# Generate the raiju configuration file.
echo 'Generating the raiju configuration file.'
{{ genRAIJU_cmd }}
echo "The RAIJU configuration file is `ls raijuconfig.h5`."
# Run the model.

View File

@@ -40,10 +40,10 @@ DEFAULT_MODULE_SET_FILE = os.path.join(
)
# Root of directory tree for this set of tests.
KAIJU_TEST_SET_ROOT = os.environ["KAIJU_TEST_SET_ROOT"]
MAGE_TEST_SET_ROOT = os.environ["MAGE_TEST_SET_ROOT"]
# Directory for weekly dash results
WEEKLY_DASH_DIRECTORY = os.path.join(KAIJU_TEST_SET_ROOT, "weeklyDash")
WEEKLY_DASH_DIRECTORY = os.path.join(MAGE_TEST_SET_ROOT, "weeklyDash")
# Path to directory containing the test scripts
TEST_SCRIPTS_DIRECTORY = os.path.join(KAIJUHOME, "testingScripts")
@@ -149,6 +149,9 @@ def weekly_dash(args: dict):
"cda2wind -t0 2016-08-09T02:00:00 -t1 2016-08-09T12:00:00"
)
# Create the command to generate the raiju configuration.
genRAIJU_cmd = "genRAIJU"
# Create the command for launching an MPI program.
mpiexec_cmd = f"mpiexec {KAIJUHOME}/scripts/preproc/pinCpuCores.sh"
@@ -176,8 +179,8 @@ def weekly_dash(args: dict):
pbs_options["walltime"] = "08:00:00"
pbs_options["modules"] = module_names
pbs_options["conda_environment"] = os.environ["CONDA_ENVIRONMENT"]
pbs_options["kaiju_test_root"] = os.environ["KAIJU_TEST_ROOT"]
pbs_options["kaiju_test_set_root"] = os.environ["KAIJU_TEST_SET_ROOT"]
pbs_options["mage_test_root"] = os.environ["MAGE_TEST_ROOT"]
pbs_options["mage_test_set_root"] = os.environ["MAGE_TEST_SET_ROOT"]
pbs_options["kaijuhome"] = KAIJUHOME
pbs_options["tmpdir"] = os.environ["TMPDIR"]
pbs_options["slack_bot_token"] = os.environ["SLACK_BOT_TOKEN"]
@@ -186,6 +189,7 @@ def weekly_dash(args: dict):
pbs_options["make_cmd"] = make_cmd
pbs_options["genLFM_cmd"] = genLFM_cmd
pbs_options["cda2wind_cmd"] = cda2wind_cmd
pbs_options["genRAIJU_cmd"] = genRAIJU_cmd
pbs_options["mpiexec_cmd"] = mpiexec_cmd
pbs_options["voltron_cmd"] = voltron_cmd

View File

@@ -0,0 +1,17 @@
<?xml version="1.0"?>
<Kaiju>
<Gamera>
<sim runid="blast3d_large2" doH5init="F" icType="BW" pdmb="4.0"/>
<idir min="-0.5" max="0.5" N="128"/>
<jdir min="-0.5" max="0.5" N="128"/>
<kdir min="-0.5" max="0.5" N="128"/>
<iPdir N="2" bcPeriodic="T"/>
<jPdir N="1" bcPeriodic="T"/>
<kPdir N="1" bcPeriodic="T"/>
<time tFin="1.0"/>
<output dtOut="0.01" tsOut="10"/>
<physics doMHD="F" do25D="T"/>
<coupling blockHalo="T"/>
<prob B0="0.1"/>>
</Gamera>
</Kaiju>

View File

@@ -0,0 +1,17 @@
<?xml version="1.0"?>
<Kaiju>
<Gamera>
<sim runid="blast3d_large8" doH5init="F" icType="BW" pdmb="4.0"/>
<idir min="-0.5" max="0.5" N="128"/>
<jdir min="-0.5" max="0.5" N="128"/>
<kdir min="-0.5" max="0.5" N="128"/>
<iPdir N="2" bcPeriodic="T"/>
<jPdir N="2" bcPeriodic="T"/>
<kPdir N="2" bcPeriodic="T"/>
<time tFin="1.0"/>
<output dtOut="0.01" tsOut="10"/>
<physics doMHD="F" do25D="T"/>
<coupling blockHalo="T"/>
<prob B0="0.1"/>>
</Gamera>
</Kaiju>

View File

@@ -21,7 +21,7 @@ contains
end subroutine lastSerial
@test(npes=[8])
subroutine testBlast3D(this)
subroutine testBlast3D_8(this)
class (MpiTestMethod), intent(inout) :: this
type(gamAppMpi_T) :: gameraAppMpi
@@ -31,7 +31,7 @@ contains
gameraAppMpi%gOptions%userInitFunc => initUser
gameraAppMpi%gOptionsMpi%gamComm = getMpiF08Communicator(this)
xmlInp = New_XML_Input('blast3d_large.xml','Kaiju',.true.)
xmlInp = New_XML_Input('blast3d_large8.xml','Kaiju',.true.)
call gameraAppMpi%InitModel(xmlInp)
do while ((gameraAppMpi%Model%tFin - gameraAppMpi%Model%t) > 1e-15)
@@ -48,7 +48,98 @@ contains
end do
write(*,*) 'End time = ', gameraAppMpi%Model%t
end subroutine testBlast3D
end subroutine testBlast3D_8
@test(npes=[2])
subroutine testBlast3D_2(this)
class (MpiTestMethod), intent(inout) :: this
type(gamAppMpi_T) :: gameraAppMpi
type(XML_Input_T) :: xmlInp
call setMpiReal()
gameraAppMpi%gOptions%userInitFunc => initUser
gameraAppMpi%gOptionsMpi%gamComm = getMpiF08Communicator(this)
xmlInp = New_XML_Input('blast3d_large2.xml','Kaiju',.true.)
call gameraAppMpi%InitModel(xmlInp)
do while ((gameraAppMpi%Model%tFin - gameraAppMpi%Model%t) > 1e-15)
call stepGamera_mpi(gameraAppMpi)
if (gameraAppMpi%Model%IO%doConsole(gameraAppMpi%Model%t)) then
call consoleOutput(gameraAppMpi%Model,gameraAppMpi%Grid,gameraAppMpi%State)
endif
if (gameraAppMpi%Model%IO%doOutput(gameraAppMpi%Model%t)) then
call fOutput(gameraAppMpi%Model,gameraAppMpi%Grid,gameraAppMpi%State)
endif
end do
write(*,*) 'End time = ', gameraAppMpi%Model%t
end subroutine testBlast3D_2
!this test must be at the bottom so that the data is generated by the two tests above
@test(npes=[1])
subroutine compareBlastWaves(this)
class (MpiTestMethod), intent(inout) :: this
type(IOVAR_T), dimension(25) :: IOVars
real(rp), allocatable :: p8(:,:,:), p2(:,:,:)
integer :: i,j,k,ni,nj,nk,ni2,nj2,nk2
character(len=strLen) :: h5Str, gStr, errMsg
call setMpiReal()
h5Str = trim('blast')
gStr = '/Step#99'
call ClearIO(IOVars)
call AddInVar(IOVars,"P")
! manually read in the 2 parts of blast3d_large2 and also determine the size of the data
h5Str = 'blast3d_large2_0002_0001_0001_0000_0000_0000.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
ni = 2*IOVars(1)%dims(1)
nj = IOVars(1)%dims(2)
nk = IOVars(1)%dims(3)
ni2 = ni/2
nj2 = nj/2
nk2 = nk/2
allocate(p2(ni,nj,nk))
allocate(p8(ni,nj,nk))
call IOArray3DFill(IOVars,"P",p2(1:ni2,:,:))
call ClearIO(IOVars)
call AddInVar(IOVars,"P")
h5Str = 'blast3d_large2_0002_0001_0001_0001_0000_0000.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
call IOArray3DFill(IOVars,"P",p2(ni2+1:ni,:,:))
call ClearIO(IOVars)
! loop to read in the parts blast3d_large8
do i=1,2
do j=1,2
do k=1,2
call AddInVar(IOVars,"P")
write(h5Str,'(A,I0,A)') 'blast3d_large8_0002_0002_0002_000',i-1,'_0000_0000.gam.h5'
call ReadVars(IOVars,.false.,h5Str,gStr)
call IOArray3DFill(IOVars,"P",p8(1+(i-1)*ni2:i*ni2,1+(j-1)*nj2:j*nj2,1+(k-1)*nk2:k*nk2))
call ClearIO(IOVars)
enddo
enddo
enddo
! check values
do i=1,ni
do j=1,nj
do k=1,nk
write(errMsg,'(A,I0,A,I0,A,I0,A)') 'Blast wave values not equal at (',i,',',j,',',k,')'
@assertEqual(p2(i,j,k),p8(i,j,k),1e-12,trim(errMsg))
enddo
enddo
enddo
end subroutine compareBlastWaves
end module testCasesMpi

View File

@@ -18,22 +18,6 @@ module load {{ module }}
{%- endfor %}
module list
echo 'Configuring python environment.'
mage_miniconda3="${HOME}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
@@ -44,9 +28,6 @@ export KMP_STACKSIZE=128M
echo 'The active environment variables are:'
printenv
echo 'Generating RAIJU configuration file.'
genRAIJU
echo 'Generating data for testing.'
MPICOMMAND="mpiexec $KAIJUHOME/scripts/preproc/pinCpuCores.sh"
$MPICOMMAND ./voltron_mpi.x cmiD_deep_8_genRes.xml >& cmiD_deep_8_genRes.out

View File

@@ -42,6 +42,13 @@ date
echo 'REMIX tests complete.'
echo | tail -n 3 ./mixTests.out
echo 'Running RCM tests.'
date
./rcmTests >& rcmTests.out
date
echo 'RCM tests complete.'
echo | tail -n 3 ./rcmTests.out
echo 'Running SHELLGRID tests.'
date
./shgrTests >& shgrTests.out

View File

@@ -22,7 +22,7 @@ echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo 'Setting environment variables.'
export OMP_NUM_THREADS=128
# export OMP_NUM_THREADS=128
export MPI_TYPE_DEPTH=32
export KMP_STACKSIZE=128M
echo 'The active environment variables are:'

View File

@@ -20,26 +20,32 @@ module list
echo 'Loading python environment.'
mage_test_root=$HOME
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
if [ -d "${mage_test_root}/miniconda3" ]; then
echo 'Loading local miniconda3'
mage_miniconda3="${mage_test_root}/miniconda3"
mage_conda="${mage_miniconda3}/bin/conda"
__conda_setup="$($mage_conda 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
export PATH="$mage_miniconda3/bin:$PATH"
if [ -f "$mage_miniconda3/etc/profile.d/conda.sh" ]; then
. "$mage_miniconda3/etc/profile.d/conda.sh"
else
export PATH="$mage_miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
else
echo 'Loading conda module'
module load conda
fi
unset __conda_setup
conda activate {{ conda_environment }}
echo 'Setting up MAGE environment.'
source {{ kaijuhome }}/scripts/setupEnvironment.sh
echo 'Setting environment variables.'
export KAIJU_TEST_SET_ROOT={{ kaiju_test_set_root }}
export MAGE_TEST_SET_ROOT={{ mage_test_set_root }}
export SLACK_BOT_TOKEN={{ slack_bot_token }}
export BRANCH_OR_COMMIT={{ branch_or_commit }}
echo 'The active environment variables are:'