Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 3 additions & 16 deletions .github/actions/dynamic-dry-run/action.yml
Original file line number Diff line number Diff line change
@@ -1,33 +1,20 @@
name: Dynamic dry run template
inputs:
benchmark_name:
required: true
type: string
benchmark_mode:
required: true
type: string
benchmark_spec:
required: true
type: string
system_name:
required: true
type: string
system_spec:
required: true
type: string
runs:
using: composite
steps:
- name: ${{ inputs.benchmark_name }}/${{ inputs.benchmark_mode }} ${{ inputs.system_name }}
${{ inputs.system_spec }}
- name: ${{ inputs.benchmark_spec }} ${{ inputs.system_spec }}
shell: bash
run: |-
timestamp=$(date +%s)
bn="${{ inputs.benchmark_name }}"
bm="${{ inputs.benchmark_mode }}"
benchmark="$bn-$bm-$timestamp"
sn="${{ inputs.system_name }}"
system="$sn-$timestamp"
benchmark="b-$timestamp"
system="s-$timestamp"
./bin/benchpark system init --dest=$system ${{ inputs.system_spec }}
./bin/benchpark experiment init --dest=$benchmark ${{ inputs.benchmark_spec }}
./bin/benchpark setup ./$benchmark ./$system workspace/
Expand Down
45 changes: 45 additions & 0 deletions .github/workflows/gen-dynamic-runs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import subprocess
import json


# Original dictionary
data = {
"openmp": {"benchmark_spec": [], "system_spec": ["llnl-cluster cluster=ruby"]},
"cuda": {"benchmark_spec": [], "system_spec": ["llnl-sierra"]},
"rocm": {"benchmark_spec": [], "system_spec": ["llnl-elcapitan cluster=tioga"]},
"weak": {"benchmark_spec": [], "system_spec": ["generic-x86"]},
"strong": {"benchmark_spec": [], "system_spec": ["generic-x86"]},
"single_node": {"benchmark_spec": [], "system_spec": ["generic-x86"]},
"throughput": {"benchmark_spec": [], "system_spec": ["generic-x86"]},
}


def main():
try:
expr_cmd = subprocess.run(
[
"./bin/benchpark",
"list",
"experiments",
],
capture_output=True,
check=True,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Output: {e.stdout}\nError: {e.stderr}")
expr_str = str(expr_cmd.stdout, "utf-8")
experiments = expr_str.replace(" ", "").replace("\t", "").split("\n")
experiments = [
item for item in experiments if "+" in item
]

for expr in experiments:
name, mode = expr.split("+")
data[mode]["benchmark_spec"].append(expr)

with open("matrix.json", "w") as f:
json.dump(data, f)


if __name__ == "__main__":
main()
Loading
Loading