Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
aadc21e
Fix memory units for mac in output
tdrwenski Apr 22, 2025
245bc10
Increase accuracy of timing that is output
tdrwenski Apr 22, 2025
22b0e65
Refactor some test functions to test_utils
tdrwenski Apr 22, 2025
ee16d30
Add pytest benchmark and performance tests
tdrwenski Apr 22, 2025
b29f7b2
Add gitlab job for performance tests and github benchmark workflow fo…
tdrwenski Apr 22, 2025
89dabef
Add performance tests as seperate pipeline that also runs weekly
tdrwenski Apr 23, 2025
b322cef
Remove run on PR for benchmark workflow
tdrwenski Apr 23, 2025
cdc2591
Skip build-and-test when running scheduled pipeline
tdrwenski Apr 23, 2025
236056f
Add fail on alert to benchmark workflow
tdrwenski Apr 23, 2025
fb8af78
Fix sharing cli options between tests
tdrwenski Apr 24, 2025
addd524
Add readme for performance tests
tdrwenski Apr 24, 2025
6c43aae
Add memory results to the pytest-benchmark json output
tdrwenski Apr 24, 2025
332c657
Add extra test output
tdrwenski Apr 24, 2025
a2d4d2b
Add pytest-benchmark generated benchmarks to the gitignore
tdrwenski Apr 24, 2025
d6fef8e
Use custom metric for benchmark dashboard so we can display memory
tdrwenski Apr 24, 2025
5013c47
Revert custom pytest hook and put benchmark format conversion in own …
tdrwenski Apr 24, 2025
a1d32b9
Remove extraneous info from dashboard
tdrwenski Apr 25, 2025
6daa8bb
Update the GitLab ref and later only submit results from main to dash…
tdrwenski Apr 25, 2025
e9328ed
Add a step to make sure workflow passes/fails in PR associated with b…
tdrwenski Apr 25, 2025
7578203
Fix benchmark workflow to report results on PR
tdrwenski Apr 25, 2025
272f06a
Add perfomance test case
tdrwenski Apr 25, 2025
c29ea85
Fix performance job name
tdrwenski Apr 25, 2025
4050ae7
Update test case
tdrwenski Apr 28, 2025
fd69454
Move tests to tests directory
tdrwenski Apr 29, 2025
97b160b
Update test paths
tdrwenski Apr 29, 2025
41d77c7
Update pytest options
tdrwenski Apr 29, 2025
691fbfe
Style fix
tdrwenski Apr 29, 2025
9bad917
Update regression test path in docs
tdrwenski Apr 29, 2025
a903a20
Revert "Increase accuracy of timing that is output"
tdrwenski Apr 29, 2025
5d33000
Update test case
tdrwenski May 2, 2025
2f5b50a
Update test names
tdrwenski May 2, 2025
64444d6
Update test case
tdrwenski May 2, 2025
be92f00
Make performance tests run in stage perf-test after regression tests …
tdrwenski May 13, 2025
a04b408
Add a petsc-parallel regression test.
steffi7574 May 13, 2025
d3d6bea
Fix Readme .
steffi7574 May 13, 2025
3464cb3
Update readme to have info about performance test dashboard
tdrwenski May 14, 2025
991ea20
Update user_guide.pdf
tdrwenski May 15, 2025
1b11484
Removing testing workaround-- only push to the dashboard on main
tdrwenski May 15, 2025
00d0f8f
Improve test readme
tdrwenski May 16, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 24 additions & 2 deletions .ci-scripts/build_and_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use_dev_shm=${USE_DEV_SHM:-true}
spack_debug=${SPACK_DEBUG:-false}
debug_mode=${DEBUG_MODE:-false}
push_to_registry=${PUSH_TO_REGISTRY:-true}
performance_tests=${PERFORMANCE_TESTS:-false}

# REGISTRY_TOKEN allows you to provide your own personal access token to the CI
# registry. Be sure to set the token with at least read access to the registry.
Expand Down Expand Up @@ -223,7 +224,7 @@ then
fi

# Test
if [[ "${option}" != "--build-only" ]]
if [[ "${option}" != "--build-only" && "${performance_tests}" != "true" ]]
then

if [[ ! -d ${build_dir} ]]
Expand Down Expand Up @@ -262,11 +263,32 @@ then
timed_message "Run regression tests"

mpi_exe=$(grep 'MPIEXEC_EXECUTABLE' "${hostconfig_path}" | cut -d'"' -f2 | sed 's/;/ /g')
pytest -v -s regression_tests --mpi-exec="${mpi_exe}"
pytest -v -s tests/regression --mpi-exec="${mpi_exe}"

timed_message "Quandary tests completed"
fi

# Performance tests
if [[ "${option}" != "--build-only" && "${performance_tests}" == "true" ]]
then

cd ${project_dir}

timed_message "Performance tests for Quandary"

timed_message "Install python test dependencies"

eval `${spack_cmd} env activate ${spack_env_path} --sh`
python -m pip install -e . --prefer-binary

timed_message "Run performance tests"

mpi_exe=$(grep 'MPIEXEC_EXECUTABLE' "${hostconfig_path}" | cut -d'"' -f2 | sed 's/;/ /g')
pytest -v -s tests/performance --mpi-exec="${mpi_exe}" --benchmark-json=benchmark_results.json

timed_message "Quandary performance tests completed"
fi

cd ${project_dir}

timed_message "Build and test completed"
Expand Down
65 changes: 65 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
name: Process Benchmark

on:
workflow_dispatch:
inputs:
benchmark_data:
description: 'Base64 encoded benchmark data'
required: true

jobs:
store-and-visualize:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- name: Find associated PR
id: findpr
run: |
PR_NUMBER=$(gh pr list --head ${{ github.ref_name }} --json number -q '.[0].number')
echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: Decode benchmark data
run: echo "${{ github.event.inputs.benchmark_data }}" | base64 -d > pytest_benchmark.json

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'

- name: Convert benchmark format
run: |
python tests/performance/convert_benchmark_format.py pytest_benchmark.json benchmark.json

- name: Store benchmark result
id: benchmark
continue-on-error: true
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customSmallerIsBetter'
output-file-path: benchmark.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: ${{ github.ref == 'refs/heads/main' }}
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
comment-on-alert: true
fail-on-alert: true
alert-threshold: '120%'
max-items-in-chart: 100

- name: Create Check for PRs
if: steps.findpr.outputs.pr_number != ''
uses: LouisBrunner/checks-action@v1.6.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
name: Performance Benchmark
sha: ${{ github.sha }}
conclusion: ${{ steps.benchmark.outcome == 'success' && 'success' || 'failure' }}
output: |
{"summary": "${{ steps.benchmark.outcome == 'success' && 'Performance check passed' || 'Performance regression detected!' }}"}

- name: Final status
if: steps.benchmark.outcome != 'success'
run: exit 1
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
run: |
spack env activate .spack_env
python -m pip install -e . --prefer-binary
pytest -v -s regression_tests --mpi-opt="--oversubscribe"
pytest -v -s tests/regression --mpi-opt="--oversubscribe"

- name: Push packages and update index
env:
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ __pycache__
run_dir/
.spack_env/.spack-env/
.spack_env/spack.lock
.benchmarks/
14 changes: 14 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ variables:
stages:
- prerequisites
- build-and-test
- perf-test

# Template for jobs triggering a build-and-test sub-pipeline:
.build-and-test:
Expand All @@ -81,6 +82,19 @@ stages:
forward:
pipeline_variables: true

.performance-test:
stage: perf-test
trigger:
include:
- local: '.gitlab/custom-jobs-and-variables.yml'
- project: 'radiuss/radiuss-shared-ci'
ref: 'v2024.07.0'
file: 'pipelines/${CI_MACHINE}.yml'
- local: '.gitlab/jobs/ruby-performance.yml'
strategy: depend
forward:
pipeline_variables: true

include:
# Sets ID tokens for every job using `default:`
- project: 'lc-templates/id_tokens'
Expand Down
18 changes: 18 additions & 0 deletions .gitlab/custom-jobs-and-variables.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,24 @@ variables:
reports:
junit: junit.xml

.performance_job:
artifacts:
paths:
- benchmark_results.json
after_script:
- |
if [ -f "benchmark_results.json" ]; then
echo "Sending benchmark results to GitHub..."
BENCHMARK_DATA=$(base64 -w 0 benchmark_results.json)
curl -X POST \
-H "Authorization: token $GITHUB_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/${GITHUB_PROJECT_ORG}/${GITHUB_PROJECT_NAME}/actions/workflows/benchmark.yml/dispatches \
-d "{\"ref\":\"${CI_COMMIT_REF_NAME}\",\"inputs\":{\"benchmark_data\":\"$BENCHMARK_DATA\"}}"
else
echo "benchmark_results.json not found, skipping GitHub API integration."
fi

.reproducer_vars:
script:
- |
Expand Down
18 changes: 18 additions & 0 deletions .gitlab/jobs/ruby-performance.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
###############################################################################
# Copyright (c) 2022-23, Lawrence Livermore National Security, LLC and RADIUSS
# project contributors. See the COPYRIGHT file for details.
#
# SPDX-License-Identifier: (MIT)
###############################################################################

.performance_base_job:
extends:
- .job_on_ruby
- .performance_job
variables:
PERFORMANCE_TESTS: "true"

clang_14_0_6:
extends: .performance_base_job
variables:
SPEC: "${PROJECT_RUBY_VARIANTS} %clang@=14.0.6 ${PROJECT_RUBY_DEPS}"
6 changes: 5 additions & 1 deletion .gitlab/subscribed-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,8 @@ lassen-build-and-test:
needs: [lassen-up-check, generate-job-lists]
extends: [.build-and-test]


# PERFORMANCE
ruby-performance-test:
variables:
CI_MACHINE: "ruby"
extends: [.performance-test]
11 changes: 8 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,16 @@ The `examples/pythoninterface` folder exemplifies the usage of Quandary's Python
# Tests

## Regression tests
Regression tests are defined in `regression_tests/` and can be run with
Regression tests are defined in `tests/regression` and can be run with
```
pytest
pytest tests/regression
```
See regression_tests/README.md for more information.
See `tests/regression/README.md` for more information.

## Performance tests
Performance regression tests are defined in `tests/performance`.
The latest results from `main` are shown on this [performance dashboard](https://software.llnl.gov/quandary/dev/bench/).
See `tests/performance/README.md` for more information.

# Community and Contributing

Expand Down
Binary file modified doc/user_guide.pdf
Binary file not shown.
2 changes: 1 addition & 1 deletion doc/user_guide.tex
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,7 @@ \subsection{Plotting}

\section{Testing}
\begin{itemize}
\item Quandary has a set of regression tests. Please take a look at the \verb+regression_tests/README.md+ document for instructions on how to run the regression tests.
\item Quandary has a set of regression tests. Please take a look at the \verb+tests/regression/README.md+ document for instructions on how to run the regression tests.
\item In order to check if the gradient implementation is correct, one can choose to run a Central Finite Difference test. Let the overall objective function be denoted by $F(\boldsymbol{\alpha})$. The Central Finite Difference test compares each element of the gradient $\nabla F(\boldsymbol{\alpha})$ with the following (second-order accurate) estimate:
\begin{align*}
\left(\nabla F(\boldsymbol{\alpha}) \right)_i \approx \frac{F(\bfa + \epsilon\bs{e}_i) - F(\bfa - \epsilon\bs{e}_i)}{2\epsilon} \qquad \qquad \text{(CFD)}
Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ dependencies = [
"matplotlib (>=3.9.0,<4.0.0)",
"pytest (>=8.3.4,<9.0.0)",
"pandas (>=2.2.3,<3.0.0)",
"pydantic (>=2.10.6,<3.0.0)"
"pydantic (>=2.10.6,<3.0.0)",
"pytest-benchmark (>=5.1.0,<6.0.0)"
]


Expand Down
10 changes: 8 additions & 2 deletions src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,14 @@ int main(int argc,char **argv)
/* Get memory usage */
struct rusage r_usage;
getrusage(RUSAGE_SELF, &r_usage);
double myMB = (double)r_usage.ru_maxrss / 1024.0;
double myMB;
#ifdef __APPLE__
// On macOS, ru_maxrss is in bytes
myMB = (double)r_usage.ru_maxrss / (1024.0 * 1024.0);
#else
// On Linux, ru_maxrss is in kilobytes
myMB = (double)r_usage.ru_maxrss / 1024.0;
#endif
double globalMB = myMB;
MPI_Allreduce(&myMB, &globalMB, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);

Expand All @@ -479,7 +486,6 @@ int main(int argc,char **argv)
printf(" Used Time: %.2f seconds\n", UsedTime);
printf(" Processors used: %d\n", mpisize_world);
printf(" Global Memory: %.2f MB [~ %.2f MB per proc]\n", globalMB, globalMB / mpisize_world);
printf(" [NOTE: The memory unit is platform dependent. If you run on MacOS, the unit will likely be KB instead of MB.]\n");
printf("\n");
}
// printf("Rank %d: %.2fMB\n", mpirank_world, myMB );
Expand Down
11 changes: 4 additions & 7 deletions regression_tests/conftest.py → tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
def pytest_addoption(parser):
parser.addoption(
"--exact",
action="store_true",
default=False,
help="Use exact comparison for floating point numbers"
)
"""Common pytest configuration."""


def pytest_addoption(parser):
"""Add common command line options to pytest."""
parser.addoption(
"--mpi-exec",
action="store",
Expand Down
3 changes: 3 additions & 0 deletions tests/performance/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
results/
data_out/
__pycache__
44 changes: 44 additions & 0 deletions tests/performance/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Performance Regression Test Documentation

These tests are run with `pytest` and the pytest-benchmark plugin.

To run tests from base directory:
```
pytest tests/performance/
```
Or from current directory:
```
pytest
```

## Useful options:

- `pytest -v` will print the names of the tests being run

- `pytest -s` will print output to screen

- `pytest -k "myConfig_4"` runs only the test with name "myConfig" and with 4 processes

See `pytest --help` for more options.

## Local performance measurement
To save performance data from a run do:
```
pytest -s -k "test_of_interest_4" --benchmark-autosave
```
This saves results to the `.benchmarks` directory.

To compare a current run to the previous run do:
```
pytest -s -k "test_of_interest_4" --benchmark-compare
```

For more options, see pytest-benchmark [documentation](https://pytest-benchmark.readthedocs.io/en/stable/comparing.html).

## How to add a test

1. Create a config file in the `configs` directory, e.g., `tests/performance/configs/newSimulation.cfg`
2. Add new entry to test_cases.json:
- The `simulation_name` should be the new simulation name, e.g. `newSimulation`.
- The `number_of_processes` is an array of integers. For each integer `i`, a simulation will be run with `mpirun -n ${i}`.
- The `repetitions` is the number of times to run this test and to average the timings over.
58 changes: 58 additions & 0 deletions tests/performance/configs/nlevels_32_32_32_32.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
nlevels = 32, 32, 32, 32
ntime = 50
dt = 0.01
transfreq = 4.1, 4.2, 4.3, 4.4
selfkerr = 0.2, 0.2, 0.2, 0.2
crosskerr = 0.001, 0.001, 0.001, 0.001, 0.001, 0.001
Jkl = 0.001, 0.001, 0.001, 0.001, 0.001, 0.001
rotfreq = 4.1, 4.2, 4.3, 4.4
collapse_type = none
decay_time = 0.0, 0.0, 0.0, 0.0
dephase_time = 0.0, 0.0, 0.0, 0.0
initialcondition = pure, 1, 0, 0, 0
control_segments0 = spline, 15
control_segments1 = spline, 15
control_segments2 = spline, 15
control_segments3 = spline, 15
control_enforceBC=false
control_initialization0 = constant, 0.005
control_initialization1 = constant, 0.005
control_initialization2 = constant, 0.005
control_initialization3 = constant, 0.005
control_bounds0 = 0.008
control_bounds1 = 0.008
control_bounds2 = 0.008
control_bounds3 = 0.008
carrier_frequency0 = 0.0, -0.2, -0.001
carrier_frequency1 = 0.0, -0.2, -0.001
carrier_frequency2 = 0.0, -0.2, -0.001
carrier_frequency3 = 0.0, -0.2, -0.001
optim_target = gate, cqnot
optim_target = pure, 0, 0, 0, 0
optim_objective = Jtrace
optim_weights = 1.0
optim_atol = 1e-7
optim_rtol = 1e-8
optim_ftol = 1e-5
optim_inftol = 1e-5
optim_maxiter = 200
optim_regul = 0.00001
optim_penalty = 0.0
optim_penalty_param = 0.0
optim_penalty_dpdm = 0.0
optim_penalty_energy= 0.0
optim_penalty_variation= 0.0
optim_regul_tik0=false
datadir = ./data_out
output0 = none
output1 = none
output2 = none
output3 = none
output_frequency = 1
optim_monitor_frequency = 1
runtype = simulation
usematfree = false
linearsolver_type = gmres
linearsolver_maxiter = 20
timestepper = IMR
rand_seed = 1234
Loading