Skip to content

Commit 6692bd7

Browse files
author
Andrija Kolic
committed
[GR-64081] Implement new 'graalos' bench suite
PullRequest: graal/20779
2 parents 4478ae4 + 9499f1a commit 6692bd7

File tree

3 files changed

+396
-95
lines changed

3 files changed

+396
-95
lines changed

common.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"Jsonnet files should not include this file directly but use ci/common.jsonnet instead."
55
],
66

7-
"mx_version": "7.51.2",
7+
"mx_version": "7.54.3",
88

99
"COMMENT.jdks": "When adding or removing JDKs keep in sync with JDKs in ci/common.jsonnet",
1010
"jdks": {

sdk/mx.sdk/mx_sdk_benchmark.py

Lines changed: 34 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
import mx_sdk_vm
7272
import mx_sdk_vm_impl
7373
import mx_util
74-
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite
74+
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite, Vm, SingleBenchmarkExecutionContext
7575
from mx_sdk_vm_impl import svm_experimental_options
7676

7777
_suite = mx.suite('sdk')
@@ -2815,17 +2815,8 @@ def __init__(self, custom_harness_command: mx_benchmark.CustomHarnessCommand = N
28152815
custom_harness_command = BaristaBenchmarkSuite.BaristaCommand()
28162816
super().__init__(custom_harness_command)
28172817
self._version = None
2818-
self._context = None
28192818
self._extra_run_options = []
28202819

2821-
@property
2822-
def context(self):
2823-
return self._context
2824-
2825-
@context.setter
2826-
def context(self, value):
2827-
self._context = value
2828-
28292820
def readBaristaVersionFromPyproject(self):
28302821
# tomllib was included in python standard library with version 3.11
28312822
try:
@@ -2858,6 +2849,9 @@ def group(self):
28582849
def subgroup(self):
28592850
return "graal-compiler"
28602851

2852+
def benchmarkName(self):
2853+
return self.execution_context.benchmark
2854+
28612855
def benchmarkList(self, bmSuiteArgs):
28622856
exclude = []
28632857
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash (GR-59793)
@@ -2892,15 +2886,18 @@ def baristaHarnessPath(self):
28922886
return self.baristaFilePath("barista")
28932887

28942888
def baristaHarnessBenchmarkName(self):
2895-
return _baristaConfig["benchmarks"][self.context.benchmark].get("barista-bench-name", self.context.benchmark)
2889+
return _baristaConfig["benchmarks"][self.benchmarkName()].get("barista-bench-name", self.benchmarkName())
28962890

28972891
def baristaHarnessBenchmarkWorkload(self):
2898-
return _baristaConfig["benchmarks"][self.context.benchmark].get("workload")
2892+
return _baristaConfig["benchmarks"][self.benchmarkName()].get("workload")
28992893

29002894
def validateEnvironment(self):
29012895
self.baristaProjectConfigurationPath()
29022896
self.baristaHarnessPath()
29032897

2898+
def new_execution_context(self, vm: Vm, benchmarks: List[str], bmSuiteArgs: List[str]) -> SingleBenchmarkExecutionContext:
2899+
return SingleBenchmarkExecutionContext(self, vm, benchmarks, bmSuiteArgs)
2900+
29042901
def register_tracker(self, name, tracker_type):
29052902
if tracker_type in _baristaConfig["disable_trackers"]:
29062903
mx.log(f"Ignoring the registration of '{name}' tracker as it was disabled for {self.__class__.__name__}.")
@@ -2934,7 +2931,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
29342931

29352932
# Startup
29362933
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
2937-
"benchmark": self.context.benchmark,
2934+
"benchmark": self.benchmarkName(),
29382935
"metric.name": "request-time",
29392936
"metric.type": "numeric",
29402937
"metric.unit": "ms",
@@ -2947,7 +2944,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
29472944

29482945
# Warmup
29492946
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
2950-
"benchmark": self.context.benchmark,
2947+
"benchmark": self.benchmarkName(),
29512948
"metric.name": "warmup",
29522949
"metric.type": "numeric",
29532950
"metric.unit": "op/s",
@@ -2960,7 +2957,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
29602957

29612958
# Throughput
29622959
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
2963-
"benchmark": self.context.benchmark,
2960+
"benchmark": self.benchmarkName(),
29642961
"metric.name": "throughput",
29652962
"metric.type": "numeric",
29662963
"metric.unit": "op/s",
@@ -2973,7 +2970,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
29732970

29742971
# Latency
29752972
all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
2976-
"benchmark": self.context.benchmark,
2973+
"benchmark": self.benchmarkName(),
29772974
"metric.name": "latency",
29782975
"metric.type": "numeric",
29792976
"metric.unit": "ms",
@@ -2992,7 +2989,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
29922989

29932990
# Resource Usage
29942991
all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
2995-
"benchmark": self.context.benchmark,
2992+
"benchmark": self.benchmarkName(),
29962993
"metric.name": "rss",
29972994
"metric.type": "numeric",
29982995
"metric.unit": "MB",
@@ -3082,17 +3079,6 @@ def extendDatapoints(self, datapoints: DataPoints) -> DataPoints:
30823079
del datapoint["load-tester.command"]
30833080
return datapoints
30843081

3085-
def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
3086-
self.enforce_single_benchmark(benchmarks)
3087-
self.context = BaristaBenchmarkSuite.RuntimeContext(self, vm, benchmarks[0], bmSuiteArgs)
3088-
return super()._vmRun(vm, workdir, command, benchmarks, bmSuiteArgs)
3089-
3090-
def enforce_single_benchmark(self, benchmarks):
3091-
if not isinstance(benchmarks, list):
3092-
raise TypeError(f"{self.__class__.__name__} expects to receive a list of benchmarks to run, instead got an instance of {benchmarks.__class__.__name__}! Please specify a single benchmark!")
3093-
if len(benchmarks) != 1:
3094-
raise ValueError(f"You have requested {benchmarks} to be run but {self.__class__.__name__} can only run a single benchmark at a time! Please specify a single benchmark!")
3095-
30963082
class BaristaCommand(mx_benchmark.CustomHarnessCommand):
30973083
"""Maps a JVM command into a command tailored for the Barista harness.
30983084
"""
@@ -3151,7 +3137,7 @@ def produceHarnessCommand(self, cmd, suite):
31513137
jvm_vm_options = jvm_cmd[index_of_java_exe + 1:]
31523138

31533139
# Verify that the run arguments don't already contain a "--mode" option
3154-
run_args = suite.runArgs(suite.context.bmSuiteArgs) + suite._extra_run_options
3140+
run_args = suite.runArgs(suite.execution_context.bmSuiteArgs) + suite._extra_run_options
31553141
mode_pattern = r"^(?:-m|--mode)(=.*)?$"
31563142
mode_match = self._regexFindInCommand(run_args, mode_pattern)
31573143
if mode_match:
@@ -3175,37 +3161,6 @@ def produceHarnessCommand(self, cmd, suite):
31753161
barista_cmd.append(barista_bench_name)
31763162
return barista_cmd
31773163

3178-
class RuntimeContext():
3179-
"""Container class for the runtime context of BaristaBenchmarkSuite.
3180-
"""
3181-
def __init__(self, suite, vm, benchmark, bmSuiteArgs):
3182-
if not isinstance(suite, BaristaBenchmarkSuite):
3183-
raise TypeError(f"Expected an instance of {BaristaBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}")
3184-
self._suite = suite
3185-
self._vm = vm
3186-
self._benchmark = benchmark
3187-
self._bmSuiteArgs = bmSuiteArgs
3188-
3189-
@property
3190-
def suite(self):
3191-
return self._suite
3192-
3193-
@property
3194-
def vm(self):
3195-
return self._vm
3196-
3197-
@property
3198-
def benchmark(self):
3199-
"""The currently running benchmark.
3200-
3201-
Corresponds to `benchmarks[0]` in a suite method that has a `benchmarks` argument.
3202-
"""
3203-
return self._benchmark
3204-
3205-
@property
3206-
def bmSuiteArgs(self):
3207-
return self._bmSuiteArgs
3208-
32093164

32103165
mx_benchmark.add_bm_suite(BaristaBenchmarkSuite())
32113166

@@ -3959,24 +3914,25 @@ def run(self, benchmarks, bm_suite_args: List[str]) -> DataPoints:
39593914
fallback_reason = self.fallback_mode_reason(bm_suite_args)
39603915

39613916
vm = self.get_vm_registry().get_vm_from_suite_args(bm_suite_args)
3962-
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
3963-
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm, bool(fallback_reason))
3964-
3965-
if self.stages_info.fallback_mode:
3966-
# In fallback mode, all stages are run at once. There is matching code in `NativeImageVM.run_java` for this.
3967-
mx.log(f"Running benchmark in fallback mode (reason: {fallback_reason})")
3968-
datapoints += super_delegate.run(benchmarks, bm_suite_args)
3969-
else:
3970-
while self.stages_info.has_next_stage():
3971-
stage = self.stages_info.next_stage()
3972-
# Start the actual benchmark execution. The stages_info attribute will be used by the NativeImageVM to
3973-
# determine which stage to run this time.
3974-
stage_dps = super_delegate.run(benchmarks, bm_suite_args)
3975-
NativeImageBenchmarkMixin._inject_stage_keys(stage_dps, stage)
3976-
datapoints += stage_dps
3977-
3978-
self.stages_info = None
3979-
return datapoints
3917+
with self.new_execution_context(vm, benchmarks, bm_suite_args):
3918+
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
3919+
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm, bool(fallback_reason))
3920+
3921+
if self.stages_info.fallback_mode:
3922+
# In fallback mode, all stages are run at once. There is matching code in `NativeImageVM.run_java` for this.
3923+
mx.log(f"Running benchmark in fallback mode (reason: {fallback_reason})")
3924+
datapoints += super_delegate.run(benchmarks, bm_suite_args)
3925+
else:
3926+
while self.stages_info.has_next_stage():
3927+
stage = self.stages_info.next_stage()
3928+
# Start the actual benchmark execution. The stages_info attribute will be used by the NativeImageVM to
3929+
# determine which stage to run this time.
3930+
stage_dps = super_delegate.run(benchmarks, bm_suite_args)
3931+
NativeImageBenchmarkMixin._inject_stage_keys(stage_dps, stage)
3932+
datapoints += stage_dps
3933+
3934+
self.stages_info = None
3935+
return datapoints
39803936

39813937
@staticmethod
39823938
def _inject_stage_keys(dps: DataPoints, stage: Stage) -> None:

0 commit comments

Comments
 (0)