diff --git a/.github/workflows/ghci-snl-testing.yml b/.github/workflows/ghci-snl-testing.yml new file mode 100644 index 00000000..06f12856 --- /dev/null +++ b/.github/workflows/ghci-snl-testing.yml @@ -0,0 +1,186 @@ +name: ghci-snl-testing + +on: + # Runs on PRs against master + pull_request: + branches: [ master ] + types: [opened, synchronize, ready_for_review, reopened] + + # Manual run is used to bless + workflow_dispatch: + inputs: + job_to_run: + description: 'Job to run' + required: true + type: choice + options: + - gcc-openmp + - gcc-cuda + - all + + # Add schedule trigger for nightly runs at midnight MT (Standard Time) + schedule: + - cron: '0 7 * * *' # Runs at 7 AM UTC, which is midnight MT during Standard Time + +concurrency: + # Two runs are in the same group if they are testing the same git ref + # - if trigger=pull_request, the ref is refs/pull//merge + # - for other triggers, the ref is the branch tested + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +# WARNING: if you change machines where this workflow can run, you may have to adjust the +# location of the certificate file (possibly on a per-job basis). For now, since +# all ghci-snl-XYZ machines have the same certificates, this can be set at +# the workflow level +env: + NODE_EXTRA_CA_CERTS: /etc/pki/tls/certs/ca-bundle.crt + +jobs: + gcc-openmp: + if: | + ${{ + github.event_name != 'workflow_dispatch' || + ( + github.event.inputs.job_to_run == 'gcc-openmp' || + github.event.inputs.job_to_run == 'all' + ) + }} + runs-on: [self-hosted, ghci-snl-cpu, gcc] + strategy: + fail-fast: false + matrix: + build_type: [debug, release] + name: gcc-openmp / ${{ matrix.build_type }} + steps: + - name: Show action trigger + uses: actions/github-script@v7 + with: + script: | + const eventName = context.eventName; + const actor = context.actor || 'unknown'; // Default to 'unknown' if actor is not defined + let eventAction = 'N/A'; + + // Determine the event action based on the event type + if (eventName === 'pull_request') { + eventAction = context.payload.action || 'N/A'; + } else if (eventName === 'pull_request_review') { + eventAction = context.payload.review.state || 'N/A'; + } else if (eventName === 'workflow_dispatch') { + eventAction = 'manual trigger'; + } else if (eventName === 'schedule') { + eventAction = 'scheduled trigger'; + } + console.log(`The job was triggered by a ${eventName} event.`); + console.log(` - Event action: ${eventAction}`); + console.log(` - Triggered by: ${actor}`); + - name: Check out the repository + uses: actions/checkout@v4 + with: + persist-credentials: false + show-progress: false + submodules: recursive + - name: Ensure CACTS is installed + run: | + python3 -m pip install --user --upgrade --trusted-host pypi.org cacts + - name: Run tests + run: | + cmd="cacts -m ghci-snl-cpu -t ${{ matrix.build_type }} -r ./" + echo "CACTS call: $cmd" + $cmd + - name: Upload files + if: always() + uses: actions/upload-artifact@v4 + with: + name: log-files-${{ matrix.build_type }}-ghci-snl-cpu + path: | + ctest-build/*/Testing/Temporary/Last*.log + ctest-build/*/ctest_resource_file.json + ctest-build/*/ctest_script.cmake + ctest-build/*/CMakeCache.txt + gcc-cuda: + if: | + ${{ + github.event_name != 'workflow_dispatch' || + ( + github.event.inputs.job_to_run == 'gcc-cuda' || + github.event.inputs.job_to_run == 'all' + ) + }} + runs-on: [self-hosted, ghci-snl-cuda, cuda, gcc] + strategy: + fail-fast: false + matrix: + build_type: [debug, release] + name: gcc-cuda / ${{ matrix.build_type }} + steps: + - name: Show action trigger + uses: actions/github-script@v7 + with: + script: | + const eventName = context.eventName; + const actor = context.actor || 'unknown'; // Default to 'unknown' if actor is not defined + let eventAction = 'N/A'; + + // Determine the event action based on the event type + if (eventName === 'pull_request') { + eventAction = context.payload.action || 'N/A'; + } else if (eventName === 'pull_request_review') { + eventAction = context.payload.review.state || 'N/A'; + } else if (eventName === 'workflow_dispatch') { + eventAction = 'manual trigger'; + } else if (eventName === 'schedule') { + eventAction = 'scheduled trigger'; + } + console.log(`The job was triggered by a ${eventName} event.`); + console.log(` - Event action: ${eventAction}`); + console.log(` - Triggered by: ${actor}`); + - name: Check out the repository + uses: actions/checkout@v4 + with: + persist-credentials: false + show-progress: false + submodules: recursive + - name: Get CUDA Arch + run: | + # Ensure nvidia-smi is available + if ! command -v nvidia-smi &> /dev/null; then + echo "nvidia-smi could not be found. Please ensure you have Nvidia drivers installed." + exit 1 + fi + + # Get the GPU model from nvidia-smi, and set env for next step + gpu_model=$(nvidia-smi --query-gpu=name --format=csv,noheader | head -n 1) + case "$gpu_model" in + *"H100"*) + echo "ARCH=HOPPER90" >> $GITHUB_ENV + ;; + *"A100"*) + echo "ARCH=AMPERE80" >> $GITHUB_ENV + ;; + *"V100"*) + echo "ARCH=VOLTA70" >> $GITHUB_ENV + ;; + *) + echo "Unsupported GPU model: $gpu_model" + exit 1 + ;; + esac + - name: Ensure CACTS is installed + run: | + python3 -m pip install --user --upgrade --trusted-host pypi.org cacts + - name: Run tests + run: | + cmd="cacts -m ghci-snl-cuda -t ${{ matrix.build_type }} -r ./ -c Kokkos_ARCH_${{ env.ARCH }}=ON" + echo "CACTS call: $cmd" + $cmd + - name: Upload files + if: always() + uses: actions/upload-artifact@v4 + with: + name: log-files-${{ matrix.build_type }}-ghci-snl-cuda + path: | + ctest-build/*/Testing/Temporary/Last*.log + ctest-build/*/ctest_resource_file.json + ctest-build/*/ctest_script.cmake + ctest-build/*/CMakeCache.txt diff --git a/cacts.yaml b/cacts.yaml new file mode 100644 index 00000000..99c5caed --- /dev/null +++ b/cacts.yaml @@ -0,0 +1,114 @@ +# Configuration file for CACTS +# +# There are three main sections: project, machines build_types +# - project: contains basic info on the project +# - machines: contains a list of machines on which testing is allowed +# - configurations: contains a list of build types that can be built +# +# The machines and configurations sections CAN contain an entry "default", which +# defines some defaults for all machines/build_types. Other entries will OVERWRITE anything +# that is also set in the default entry. It is recommended to keep the default +# entry, since it can be used to list ALL possible settings, for documentation purposes. +# +# Upon parsing the yaml file, CACTS will create one Project, one Machine, and one or +# more BuildType objects. These objects will contain members with *the same* name as the +# configs in the yaml file. Notice the settings names are hard-coded, so you can't add +# a new setting and hope that it gets set in the object. +# +# The objects settings CAN be used in the yaml file to programmatically set other options. +# For instance, a build type can use properties of the project/machine to set a cmake var. +# The syntax is ${.}, where is 'project', 'machine', or 'build', and +# and must be a valid attribute of the corresponding object (see the +# corresponding py files for valid options). If you use the ${..} syntax, +# we recommend that you wrap the entry in quotes, to avoid any surprise with YAML parsers. +# The ${..} syntax is actually more powerful than that, and can perform any python operation, +# with some restriction (e.g., imports or tinkering with global vars is prohibited, +# for security purposes. +# +# In addition to the ${..} syntax, CACTS also supports bash commands evaluation, +# with the syntax $(cmd). This can be used in conjunction with ${}. E.g., one can do +# NetCDF_Fortran_ROOT: $(${machine.env_setup} && nf-config --prefix) +# Python expressions ${..} are always evaluated first, bash expressions $(..) are +# evaluated afterwards. + +########################################################################################## +# PROJECT SETTINGS # +########################################################################################## + +project: + name: EKAT + # NOTE: CACTS will also set project.root_dir at runtime, so you can actually use + # ${project.root_dir} in the machines/configurations sections + +########################################################################################## +# MACHINES # +########################################################################################## + +machines: + # CACTS will also set an entry machine.name, where the value of name matches the yaml map section name + default: + cxx_compiler: mpicxx + c_compiler: mpicc + ftn_compiler: mpifort + mach_file: "${str(project.root_dir) + '/cmake/machine-files/' + machine.name + '.cmake'}" + gpu_arch: null + batch: null + num_bld_res: null + num_run_res: null + valg_supp_file: null + node_regex: null + + mappy: + env_setup: + - 'module purge' + - 'module load sems-cmake/3.27.9 sems-git/2.42.0 sems-gcc/11.4.0 sems-openmpi-no-cuda/4.1.6' + valg_supp_file: "${project.root_dir}/scripts/jenkins/valgrind/mappy.supp" + + weaver: + env_setup: + - "source /etc/profile.d/modules.sh" + - "module purge" + - "module load cmake/3.25.1 git/2.39.1 python/3.10.8 gcc/11.3.0 cuda/11.8.0 openmpi" + + batch: "bsub -I -q rhel8 -n 4 -gpu num=4" + num_run_res: 4 # four gpus + gpu_arch: "cuda" + + ghci-snl-cpu: {} + + ghci-snl-cuda: + gpu_arch: "cuda" + num_run_res: 2 + +######################################################################################### +# BUILDS CONFIGURATIONS # +######################################################################################### + +configurations: + # CACTS will also set an entry build.name, where the value of name matches the yaml map section name + default: + longname: null # If not set, will default to build.name + description: null + uses_baselines: False + on_by_default: True + cmake_args: + EKAT_ENABLE_ALL_PACKAGES: True + EKAT_TEST_THREAD_INC: ${2 if machine.gpu_arch is None else 1} + EKAT_TEST_MAX_THREADS: ${machine.num_run_res if machine.gpu_arch is None else 1} + EKAT_DISABLE_TPL_WARNINGS: True + EKAT_DEFAULT_BFB: True + EKAT_TEST_DOUBLE_PRECISION: True + EKAT_TEST_SINGLE_PRECISION: True + EKAT_SKIP_FIND_YAML_CPP: True + + debug: + longname: debug + description: "debug build with both double and single precision testing" + cmake_args: + CMAKE_BUILD_TYPE: Debug + Kokkos_ENABLE_DEBUG_BOUNDS_CHECK: True + release: + longname: release + description: "release build with both double and single precision testing" + cmake_args: + CMAKE_BUILD_TYPE: Release diff --git a/cmake/machine-files/ghci-snl-cpu.cmake b/cmake/machine-files/ghci-snl-cpu.cmake new file mode 100644 index 00000000..07ad6ff2 --- /dev/null +++ b/cmake/machine-files/ghci-snl-cpu.cmake @@ -0,0 +1,5 @@ +# Common settings for our ghci images +include(${CMAKE_CURRENT_LIST_DIR}/ghci-snl.cmake) + +# Currently, we have 32 cores for each ghci-snl-cpu instance, but 4 ranks is enough +set(EKAT_TEST_MAX_RANKS 4 CACHE STRING "Upper limit on ranks for mpi tests") diff --git a/cmake/machine-files/ghci-snl-cuda.cmake b/cmake/machine-files/ghci-snl-cuda.cmake new file mode 100644 index 00000000..96a23941 --- /dev/null +++ b/cmake/machine-files/ghci-snl-cuda.cmake @@ -0,0 +1,10 @@ +# Common settings for our ghci images +include(${CMAKE_CURRENT_LIST_DIR}/ghci-snl.cmake) + +# Enable CUDA in kokkos +include (${CMAKE_CURRENT_LIST_DIR}/kokkos/cuda.cmake) + +set(EKAT_MPI_NP_FLAG "-n" CACHE STRING "The mpirun flag for designating the total number of ranks") + +# Currently, we have 2 GPUs/node on Blake, and we run a SINGLE build per node, so we can fit 2 ranks there +set(EKAT_TEST_MAX_RANKS 2 CACHE STRING "Upper limit on ranks for mpi tests") diff --git a/cmake/machine-files/ghci-snl.cmake b/cmake/machine-files/ghci-snl.cmake new file mode 100644 index 00000000..85daf8c9 --- /dev/null +++ b/cmake/machine-files/ghci-snl.cmake @@ -0,0 +1,9 @@ +# Let's catch usage of code deprecated in Kokkos 4 +option (Kokkos_ENABLE_DEPRECATED_CODE_4 "" OFF) + +# We need to manage resources to spread across available cores/gpus +option (EKAT_TEST_LAUNCHER_MANAGE_RESOURCES "" ON) + +# Needed by EkatCreateUnitTest +set (EKAT_MPIRUN_EXE "mpirun" CACHE STRING "") +set (EKAT_MPI_NP_FLAG "-n" CACHE STRING "") diff --git a/scripts/jenkins/jenkins.sh b/scripts/jenkins/jenkins.sh deleted file mode 100755 index 7820eba7..00000000 --- a/scripts/jenkins/jenkins.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /bin/bash -xe - -EKAT_JENKINS_SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) -DATE_STAMP=$(date "+%Y-%m-%d_%H%M%S") - -if [ -z "$WORKSPACE" ]; then - echo "Must run from Jenkins job" - exit 1 -fi - -cd $WORKSPACE/${BUILD_ID} - -export WORK_DIR=$(pwd) - -# setup env, use SCREAM env -export SCREAM_SCRIPTS=${WORK_DIR}/scream/components/eamxx/scripts -export JENKINS_SCRIPT_DIR=${SCREAM_SCRIPTS}/jenkins # some scream env setups depend on this -source ${SCREAM_SCRIPTS}/jenkins/${NODE_NAME}_setup -source ${SCREAM_SCRIPTS}/source_to_load_eamxx_env.sh - -if [ -z "$SCREAM_MACHINE" ]; then - echo "SCREAM_MACHINE must be set by ${SCREAM_SCRIPTS}/jenkins/${NODE_NAME}_setup in order for jenkins infrastructure to work" - exit 1 -fi - -export SCREAM_MACHINE - -BATCHP=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE batch) - -set -o pipefail -$BATCHP $EKAT_JENKINS_SCRIPT_DIR/jenkins_impl.sh 2>&1 | tee JENKINS_$DATE_STAMP diff --git a/scripts/jenkins/jenkins_impl.sh b/scripts/jenkins/jenkins_impl.sh deleted file mode 100755 index 010f8143..00000000 --- a/scripts/jenkins/jenkins_impl.sh +++ /dev/null @@ -1,241 +0,0 @@ -#! /bin/bash -x - -rm -rf ekat-build ekat-install - -# Merge origin master, to make sure we're up to date. If merge fails, exit. -cd ${WORK_DIR}/ekat-src; -git log -1 -git merge origin/master && cd ${WORK_DIR} -if [ $? -ne 0 ]; then - echo "Error trying to merge origin/master" - exit 1; -fi - -# Query scream for machine info -MPICXX=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE cxx_compiler) -MPICC=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE c_compiler) -MPIF90=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE f90_compiler) -COMP_J=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE comp_j) -TEST_J=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE test_j) -ISCUDA=$(${SCREAM_SCRIPTS}/query-eamxx $SCREAM_MACHINE cuda) - -# We create separate builds for single precision (SP), double precision (DP), -# DP with floating point exceptions enabled (FPE), and, on CUDA, DP with -# Cuda Unified Virtual Memory (UVM) as memory space. - -FAILED_SP="" -FAILED_DP="" -FAILED_FPE="" -FAILED_UVM="" -RET_SP=0 -RET_DP=0 -RET_FPE=0 -RET_UVM=0 - -export CTEST_PARALLEL_LEVEL=${TEST_J} -EKAT_THREAD_SETTINGS="" -if [[ "$ISCUDA" == "False" ]]; then - EKAT_THREAD_SETTINGS="-DEKAT_TEST_THREAD_INC=2 -DEKAT_TEST_MAX_THREADS=14" -fi - -# Build and test single precision -mkdir -p ekat-build/ekat-sp && cd ekat-build/ekat-sp && rm -rf * - -cmake -C ${WORK_DIR}/ekat-src/cmake/machine-files/${NODE_NAME}.cmake \ - -DCMAKE_INSTALL_PREFIX=${WORK_DIR}/ekat-install/ekat-sp \ - -DCMAKE_BUILD_TYPE=DEBUG \ - -DCMAKE_C_COMPILER=${MPICC} \ - -DCMAKE_CXX_COMPILER=${MPICXX} \ - -DCMAKE_Fortran_COMPILER=${MPIF90} \ - -DEKAT_DISABLE_TPL_WARNINGS=ON \ - -DEKAT_ENABLE_FPE_DEFAULT_MASK=OFF \ - -DEKAT_ENABLE_TESTS=ON \ - -DEKAT_TEST_DOUBLE_PRECISION=OFF \ - -DEKAT_TEST_SINGLE_PRECISION=ON \ - ${EKAT_THREAD_SETTINGS} \ - ${WORK_DIR}/ekat-src - -if [ $? -ne 0 ]; then - echo "Something went wrong while configuring the SP case." - RET_SP=1 -else - make -j ${COMP_J} - if [ $? -ne 0 ]; then - echo "Something went wrong while building the SP case." - RET_SP=1 - else - ctest --output-on-failure - if [ $? -ne 0 ]; then - echo "Something went wrong while testing the SP case." - RET_SP=1 - FAILED_SP=$(cat Testing/Temporary/LastTestsFailed.log) - else - make install - if [ $? -ne 0 ]; then - echo "Something went wrong while installing the SP case." - RET_SP=1 - fi - fi - fi -fi -cd ${WORK_DIR} - -# Build and test double precision -mkdir -p ekat-build/ekat-dp && cd ekat-build/ekat-dp && rm -rf * - -cmake -C ${WORK_DIR}/ekat-src/cmake/machine-files/${NODE_NAME}.cmake \ - -DCMAKE_INSTALL_PREFIX=${WORK_DIR}/ekat-install/ekat-dp \ - -DCMAKE_BUILD_TYPE=DEBUG \ - -DCMAKE_C_COMPILER=${MPICC} \ - -DCMAKE_CXX_COMPILER=${MPICXX} \ - -DCMAKE_Fortran_COMPILER=${MPIF90} \ - -DEKAT_DISABLE_TPL_WARNINGS=ON \ - -DEKAT_ENABLE_TESTS=ON \ - -DEKAT_ENABLE_FPE_DEFAULT_MASK=OFF \ - -DEKAT_TEST_DOUBLE_PRECISION=ON \ - -DEKAT_TEST_SINGLE_PRECISION=OFF \ - ${EKAT_THREAD_SETTINGS} \ - ${WORK_DIR}/ekat-src - -if [ $? -ne 0 ]; then - echo "Something went wrong while configuring the DP case." - RET_DP=1 -else - make -j ${COMP_J} - if [ $? -ne 0 ]; then - echo "Something went wrong while building the DP case." - RET_DP=1 - else - ctest --output-on-failure - if [ $? -ne 0 ]; then - echo "Something went wrong while testing the DP case." - RET_DP=1 - FAILED_DP=$(cat Testing/Temporary/LastTestsFailed.log) - else - make install - if [ $? -ne 0 ]; then - echo "Something went wrong while installing the DP case." - RET_DP=1 - fi - fi - fi -fi -cd ${WORK_DIR} - -if [[ "$ISCUDA" == "False" ]]; then - # Build and test double precision with FPE on, and packsize=1 - mkdir -p ekat-build/ekat-fpe && cd ekat-build/ekat-fpe && rm -rf * - - cmake -C ${WORK_DIR}/ekat-src/cmake/machine-files/${NODE_NAME}.cmake \ - -DCMAKE_INSTALL_PREFIX=${WORK_DIR}/ekat-install/ekat-fpe \ - -DCMAKE_BUILD_TYPE=DEBUG \ - -DCMAKE_C_COMPILER=${MPICC} \ - -DCMAKE_CXX_COMPILER=${MPICXX} \ - -DCMAKE_Fortran_COMPILER=${MPIF90} \ - -DEKAT_DISABLE_TPL_WARNINGS=ON \ - -DEKAT_ENABLE_TESTS=ON \ - -DEKAT_ENABLE_FPE_DEFAULT_MASK=ON \ - -DEKAT_TEST_PACK_SIZE=1 \ - -DEKAT_TEST_DOUBLE_PRECISION=ON \ - -DEKAT_TEST_SINGLE_PRECISION=OFF \ - ${EKAT_THREAD_SETTINGS} \ - ${WORK_DIR}/ekat-src - - if [ $? -ne 0 ]; then - echo "Something went wrong while configuring the FPE case." - RET_FPE=1 - else - make -j ${COMP_J} - if [ $? -ne 0 ]; then - echo "Something went wrong while building the FPE case." - RET_FPE=1 - else - ctest --output-on-failure - if [ $? -ne 0 ]; then - echo "Something went wrong while testing the FPE case." - RET_FPE=1 - FAILED_FPE=$(cat Testing/Temporary/LastTestsFailed.log) - else - make install - if [ $? -ne 0 ]; then - echo "Something went wrong while installing the FPE case." - RET_FPE=1 - fi - fi - fi - fi - cd ${WORK_DIR} -fi - -if [[ "$ISCUDA" == "True" ]]; then - # Build and test Cuda UVM - mkdir -p ekat-build/ekat-uvm && cd ekat-build/ekat-uvm && rm -rf * - - cmake -C ${WORK_DIR}/ekat-src/cmake/machine-files/${NODE_NAME}.cmake \ - -DCMAKE_INSTALL_PREFIX=${WORK_DIR}/ekat-install/ekat-uvm \ - -DCMAKE_BUILD_TYPE=DEBUG \ - -DCMAKE_C_COMPILER=${MPICC} \ - -DCMAKE_CXX_COMPILER=${MPICXX} \ - -DCMAKE_Fortran_COMPILER=${MPIF90} \ - -DEKAT_DISABLE_TPL_WARNINGS=ON \ - -DEKAT_ENABLE_TESTS=ON \ - -DEKAT_TEST_DOUBLE_PRECISION=ON \ - -DEKAT_TEST_SINGLE_PRECISION=OFF \ - -DKokkos_ENABLE_CUDA_UVM=ON \ - ${EKAT_THREAD_SETTINGS} \ - ${WORK_DIR}/ekat-src - - if [ $? -ne 0 ]; then - echo "Something went wrong while configuring the UVM case." - RET_UVM=1 - else - make -j ${COMP_J} - if [ $? -ne 0 ]; then - echo "Something went wrong while building the UVM case." - RET_UVM=1 - else - ctest --output-on-failure - if [ $? -ne 0 ]; then - echo "Something went wrong while testing the UVM case." - RET_UVM=1 - FAILED_UVM=$(cat Testing/Temporary/LastTestsFailed.log) - else - make install - if [ $? -ne 0 ]; then - echo "Something went wrong while installing the UVM case." - RET_UVM=1 - fi - fi - fi - fi - cd ${WORK_DIR} -fi - -# Print list of failed tests -if [[ $RET_SP -ne 0 && "$FAILED_SP" != "" ]]; then - echo "List of failed SP tests:" - echo "$FAILED_SP" -fi - -if [[ $RET_DP -ne 0 && "$FAILED_DP" != "" ]]; then - echo "List of failed DP tests:" - echo "$FAILED_DP" -fi - -if [[ $RET_FPE -ne 0 && "$FAILED_FPE" != "" ]]; then - echo "List of failed FPE tests:" - echo "$FAILED_FPE" -fi - -if [[ $RET_UVM -ne 0 && "$FAILED_UVM" != "" ]]; then - echo "List of failed DP tests:" - echo "$FAILED_UVM" -fi - -# Check if all builds succeded, and establish success/fail -if [[ $RET_SP -ne 0 || $RET_DP -ne 0 || $RET_FPE -ne 0 || $RET_UVM -ne 0 ]]; then - exit 1; -fi - -# All good, return 0 -exit 0; diff --git a/tests/core/fpe_check.cpp b/tests/core/fpe_check.cpp index 7da39b12..a5571ad7 100644 --- a/tests/core/fpe_check.cpp +++ b/tests/core/fpe_check.cpp @@ -2,7 +2,7 @@ #include -namespace scream { +namespace { TEST_CASE("force_fpe") {