71
71
import mx_sdk_vm
72
72
import mx_sdk_vm_impl
73
73
import mx_util
74
- from mx_benchmark import DataPoints , DataPoint , BenchmarkSuite
74
+ from mx_benchmark import DataPoints , DataPoint , BenchmarkSuite , Vm , SingleBenchmarkExecutionContext
75
75
from mx_sdk_vm_impl import svm_experimental_options
76
76
77
77
_suite = mx .suite ('sdk' )
@@ -2815,17 +2815,8 @@ def __init__(self, custom_harness_command: mx_benchmark.CustomHarnessCommand = N
2815
2815
custom_harness_command = BaristaBenchmarkSuite .BaristaCommand ()
2816
2816
super ().__init__ (custom_harness_command )
2817
2817
self ._version = None
2818
- self ._context = None
2819
2818
self ._extra_run_options = []
2820
2819
2821
- @property
2822
- def context (self ):
2823
- return self ._context
2824
-
2825
- @context .setter
2826
- def context (self , value ):
2827
- self ._context = value
2828
-
2829
2820
def readBaristaVersionFromPyproject (self ):
2830
2821
# tomllib was included in python standard library with version 3.11
2831
2822
try :
@@ -2858,6 +2849,9 @@ def group(self):
2858
2849
def subgroup (self ):
2859
2850
return "graal-compiler"
2860
2851
2852
+ def benchmarkName (self ):
2853
+ return self .execution_context .benchmark
2854
+
2861
2855
def benchmarkList (self , bmSuiteArgs ):
2862
2856
exclude = []
2863
2857
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash (GR-59793)
@@ -2892,15 +2886,18 @@ def baristaHarnessPath(self):
2892
2886
return self .baristaFilePath ("barista" )
2893
2887
2894
2888
def baristaHarnessBenchmarkName (self ):
2895
- return _baristaConfig ["benchmarks" ][self .context . benchmark ].get ("barista-bench-name" , self .context . benchmark )
2889
+ return _baristaConfig ["benchmarks" ][self .benchmarkName () ].get ("barista-bench-name" , self .benchmarkName () )
2896
2890
2897
2891
def baristaHarnessBenchmarkWorkload (self ):
2898
- return _baristaConfig ["benchmarks" ][self .context . benchmark ].get ("workload" )
2892
+ return _baristaConfig ["benchmarks" ][self .benchmarkName () ].get ("workload" )
2899
2893
2900
2894
def validateEnvironment (self ):
2901
2895
self .baristaProjectConfigurationPath ()
2902
2896
self .baristaHarnessPath ()
2903
2897
2898
+ def new_execution_context (self , vm : Vm , benchmarks : List [str ], bmSuiteArgs : List [str ]) -> SingleBenchmarkExecutionContext :
2899
+ return SingleBenchmarkExecutionContext (self , vm , benchmarks , bmSuiteArgs )
2900
+
2904
2901
def register_tracker (self , name , tracker_type ):
2905
2902
if tracker_type in _baristaConfig ["disable_trackers" ]:
2906
2903
mx .log (f"Ignoring the registration of '{ name } ' tracker as it was disabled for { self .__class__ .__name__ } ." )
@@ -2934,7 +2931,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
2934
2931
2935
2932
# Startup
2936
2933
all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
2937
- "benchmark" : self .context . benchmark ,
2934
+ "benchmark" : self .benchmarkName () ,
2938
2935
"metric.name" : "request-time" ,
2939
2936
"metric.type" : "numeric" ,
2940
2937
"metric.unit" : "ms" ,
@@ -2947,7 +2944,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
2947
2944
2948
2945
# Warmup
2949
2946
all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
2950
- "benchmark" : self .context . benchmark ,
2947
+ "benchmark" : self .benchmarkName () ,
2951
2948
"metric.name" : "warmup" ,
2952
2949
"metric.type" : "numeric" ,
2953
2950
"metric.unit" : "op/s" ,
@@ -2960,7 +2957,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
2960
2957
2961
2958
# Throughput
2962
2959
all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
2963
- "benchmark" : self .context . benchmark ,
2960
+ "benchmark" : self .benchmarkName () ,
2964
2961
"metric.name" : "throughput" ,
2965
2962
"metric.type" : "numeric" ,
2966
2963
"metric.unit" : "op/s" ,
@@ -2973,7 +2970,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
2973
2970
2974
2971
# Latency
2975
2972
all_rules += [mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
2976
- "benchmark" : self .context . benchmark ,
2973
+ "benchmark" : self .benchmarkName () ,
2977
2974
"metric.name" : "latency" ,
2978
2975
"metric.type" : "numeric" ,
2979
2976
"metric.unit" : "ms" ,
@@ -2992,7 +2989,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
2992
2989
2993
2990
# Resource Usage
2994
2991
all_rules += [mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
2995
- "benchmark" : self .context . benchmark ,
2992
+ "benchmark" : self .benchmarkName () ,
2996
2993
"metric.name" : "rss" ,
2997
2994
"metric.type" : "numeric" ,
2998
2995
"metric.unit" : "MB" ,
@@ -3082,17 +3079,6 @@ def extendDatapoints(self, datapoints: DataPoints) -> DataPoints:
3082
3079
del datapoint ["load-tester.command" ]
3083
3080
return datapoints
3084
3081
3085
- def _vmRun (self , vm , workdir , command , benchmarks , bmSuiteArgs ):
3086
- self .enforce_single_benchmark (benchmarks )
3087
- self .context = BaristaBenchmarkSuite .RuntimeContext (self , vm , benchmarks [0 ], bmSuiteArgs )
3088
- return super ()._vmRun (vm , workdir , command , benchmarks , bmSuiteArgs )
3089
-
3090
- def enforce_single_benchmark (self , benchmarks ):
3091
- if not isinstance (benchmarks , list ):
3092
- raise TypeError (f"{ self .__class__ .__name__ } expects to receive a list of benchmarks to run, instead got an instance of { benchmarks .__class__ .__name__ } ! Please specify a single benchmark!" )
3093
- if len (benchmarks ) != 1 :
3094
- raise ValueError (f"You have requested { benchmarks } to be run but { self .__class__ .__name__ } can only run a single benchmark at a time! Please specify a single benchmark!" )
3095
-
3096
3082
class BaristaCommand (mx_benchmark .CustomHarnessCommand ):
3097
3083
"""Maps a JVM command into a command tailored for the Barista harness.
3098
3084
"""
@@ -3151,7 +3137,7 @@ def produceHarnessCommand(self, cmd, suite):
3151
3137
jvm_vm_options = jvm_cmd [index_of_java_exe + 1 :]
3152
3138
3153
3139
# Verify that the run arguments don't already contain a "--mode" option
3154
- run_args = suite .runArgs (suite .context .bmSuiteArgs ) + suite ._extra_run_options
3140
+ run_args = suite .runArgs (suite .execution_context .bmSuiteArgs ) + suite ._extra_run_options
3155
3141
mode_pattern = r"^(?:-m|--mode)(=.*)?$"
3156
3142
mode_match = self ._regexFindInCommand (run_args , mode_pattern )
3157
3143
if mode_match :
@@ -3175,37 +3161,6 @@ def produceHarnessCommand(self, cmd, suite):
3175
3161
barista_cmd .append (barista_bench_name )
3176
3162
return barista_cmd
3177
3163
3178
- class RuntimeContext ():
3179
- """Container class for the runtime context of BaristaBenchmarkSuite.
3180
- """
3181
- def __init__ (self , suite , vm , benchmark , bmSuiteArgs ):
3182
- if not isinstance (suite , BaristaBenchmarkSuite ):
3183
- raise TypeError (f"Expected an instance of { BaristaBenchmarkSuite .__name__ } , instead got an instance of { suite .__class__ .__name__ } " )
3184
- self ._suite = suite
3185
- self ._vm = vm
3186
- self ._benchmark = benchmark
3187
- self ._bmSuiteArgs = bmSuiteArgs
3188
-
3189
- @property
3190
- def suite (self ):
3191
- return self ._suite
3192
-
3193
- @property
3194
- def vm (self ):
3195
- return self ._vm
3196
-
3197
- @property
3198
- def benchmark (self ):
3199
- """The currently running benchmark.
3200
-
3201
- Corresponds to `benchmarks[0]` in a suite method that has a `benchmarks` argument.
3202
- """
3203
- return self ._benchmark
3204
-
3205
- @property
3206
- def bmSuiteArgs (self ):
3207
- return self ._bmSuiteArgs
3208
-
3209
3164
3210
3165
mx_benchmark .add_bm_suite (BaristaBenchmarkSuite ())
3211
3166
@@ -3959,24 +3914,25 @@ def run(self, benchmarks, bm_suite_args: List[str]) -> DataPoints:
3959
3914
fallback_reason = self .fallback_mode_reason (bm_suite_args )
3960
3915
3961
3916
vm = self .get_vm_registry ().get_vm_from_suite_args (bm_suite_args )
3962
- effective_stages , complete_stage_list = vm .prepare_stages (self , bm_suite_args )
3963
- self .stages_info = StagesInfo (effective_stages , complete_stage_list , vm , bool (fallback_reason ))
3964
-
3965
- if self .stages_info .fallback_mode :
3966
- # In fallback mode, all stages are run at once. There is matching code in `NativeImageVM.run_java` for this.
3967
- mx .log (f"Running benchmark in fallback mode (reason: { fallback_reason } )" )
3968
- datapoints += super_delegate .run (benchmarks , bm_suite_args )
3969
- else :
3970
- while self .stages_info .has_next_stage ():
3971
- stage = self .stages_info .next_stage ()
3972
- # Start the actual benchmark execution. The stages_info attribute will be used by the NativeImageVM to
3973
- # determine which stage to run this time.
3974
- stage_dps = super_delegate .run (benchmarks , bm_suite_args )
3975
- NativeImageBenchmarkMixin ._inject_stage_keys (stage_dps , stage )
3976
- datapoints += stage_dps
3977
-
3978
- self .stages_info = None
3979
- return datapoints
3917
+ with self .new_execution_context (vm , benchmarks , bm_suite_args ):
3918
+ effective_stages , complete_stage_list = vm .prepare_stages (self , bm_suite_args )
3919
+ self .stages_info = StagesInfo (effective_stages , complete_stage_list , vm , bool (fallback_reason ))
3920
+
3921
+ if self .stages_info .fallback_mode :
3922
+ # In fallback mode, all stages are run at once. There is matching code in `NativeImageVM.run_java` for this.
3923
+ mx .log (f"Running benchmark in fallback mode (reason: { fallback_reason } )" )
3924
+ datapoints += super_delegate .run (benchmarks , bm_suite_args )
3925
+ else :
3926
+ while self .stages_info .has_next_stage ():
3927
+ stage = self .stages_info .next_stage ()
3928
+ # Start the actual benchmark execution. The stages_info attribute will be used by the NativeImageVM to
3929
+ # determine which stage to run this time.
3930
+ stage_dps = super_delegate .run (benchmarks , bm_suite_args )
3931
+ NativeImageBenchmarkMixin ._inject_stage_keys (stage_dps , stage )
3932
+ datapoints += stage_dps
3933
+
3934
+ self .stages_info = None
3935
+ return datapoints
3980
3936
3981
3937
@staticmethod
3982
3938
def _inject_stage_keys (dps : DataPoints , stage : Stage ) -> None :
0 commit comments