|
31 | 31 | import traceback |
32 | 32 | import uuid |
33 | 33 | from argparse import ArgumentParser |
| 34 | +import os.path |
34 | 35 |
|
35 | 36 | import mx |
36 | 37 |
|
@@ -160,7 +161,20 @@ def add_bm_suite(suite, mxsuite=None): |
160 | 161 | _bm_suites[suite.name()] = suite |
161 | 162 |
|
162 | 163 |
|
163 | | -class BaseRule(object): |
| 164 | +class Rule(object): |
| 165 | + |
| 166 | + def parse(self, text): |
| 167 | + """Create a dictionary of variables for every measurment. |
| 168 | +
|
| 169 | + :param text: The standard output of the benchmark. |
| 170 | + :type text: str |
| 171 | + :return: Iterable of dictionaries with the matched variables. |
| 172 | + :rtype: iterable |
| 173 | + """ |
| 174 | + raise NotImplementedError() |
| 175 | + |
| 176 | + |
| 177 | +class BaseRule(Rule): |
164 | 178 | """A rule parses a raw result and a prepares a structured measurement using a replacement |
165 | 179 | template. |
166 | 180 |
|
@@ -324,6 +338,110 @@ def getCSVFiles(self, text): |
324 | 338 | return (m.groupdict()[self.match_name] for m in re.finditer(self.pattern, text, re.MULTILINE)) |
325 | 339 |
|
326 | 340 |
|
| 341 | +class JMHJsonRule(Rule): |
| 342 | + """Parses a JSON file produced by JMH and creates a measurement result.""" |
| 343 | + |
| 344 | + extra_jmh_keys = [ |
| 345 | + "mode", |
| 346 | + "threads", |
| 347 | + "forks", |
| 348 | + "warmupIterations", |
| 349 | + "warmupTime", |
| 350 | + "warmupBatchSize", |
| 351 | + "measurementIterations", |
| 352 | + "measurementTime", |
| 353 | + "measurementBatchSize", |
| 354 | + ] |
| 355 | + |
| 356 | + def __init__(self, filename, suiteName): |
| 357 | + self.filename = filename |
| 358 | + self.suiteName = suiteName |
| 359 | + |
| 360 | + def shortenPackageName(self, benchmark): |
| 361 | + """ |
| 362 | + Returns an abbreviated name for the benchmark. |
| 363 | + Example: com.example.benchmark.Bench -> c.e.b.Bench |
| 364 | + The full name is stored in the `extra.jmh.benchmark` property. |
| 365 | + """ |
| 366 | + s = benchmark.split(".") |
| 367 | + # class and method |
| 368 | + clazz = s[-2:] |
| 369 | + package = [str(x[0]) for x in s[:-2]] |
| 370 | + return ".".join(package + clazz) |
| 371 | + |
| 372 | + def benchSuiteName(self): |
| 373 | + return self.suiteName |
| 374 | + |
| 375 | + def getExtraJmhKeys(self): |
| 376 | + return JMHJsonRule.extra_jmh_keys |
| 377 | + |
| 378 | + def parse(self, text): |
| 379 | + r = [] |
| 380 | + with open(self.filename) as fp: |
| 381 | + for result in json.load(fp): |
| 382 | + |
| 383 | + benchmark = result["benchmark"] |
| 384 | + mode = result["mode"] |
| 385 | + |
| 386 | + pm = result["primaryMetric"] |
| 387 | + unit = pm["scoreUnit"] |
| 388 | + unit_parts = unit.split("/") |
| 389 | + |
| 390 | + if mode == "thrpt": |
| 391 | + # Throughput, ops/time |
| 392 | + metricName = "throughput" |
| 393 | + better = "higher" |
| 394 | + if len(unit_parts) == 2: |
| 395 | + metricUnit = "op/" + unit_parts[1] |
| 396 | + else: |
| 397 | + metricUnit = unit |
| 398 | + elif mode in ["avgt", "sample", "ss"]: |
| 399 | + # Average time, Sampling time, Single shot invocation time |
| 400 | + metricName = "time" |
| 401 | + better = "lower" |
| 402 | + if len(unit_parts) == 2: |
| 403 | + metricUnit = unit_parts[0] |
| 404 | + else: |
| 405 | + metricUnit = unit |
| 406 | + else: |
| 407 | + raise RuntimeError("Unknown benchmark mode {0}".format(mode)) |
| 408 | + |
| 409 | + |
| 410 | + d = { |
| 411 | + "bench-suite" : self.benchSuiteName(), |
| 412 | + "benchmark" : self.shortenPackageName(benchmark), |
| 413 | + "metric.name": metricName, |
| 414 | + "metric.unit": metricUnit, |
| 415 | + "metric.score-function": "id", |
| 416 | + "metric.better": better, |
| 417 | + "metric.type": "numeric", |
| 418 | + # full name |
| 419 | + "extra.jmh.benchmark" : benchmark, |
| 420 | + } |
| 421 | + |
| 422 | + if "params" in result: |
| 423 | + # add all parameter as a single string |
| 424 | + d["extra.jmh.params"] = ", ".join(["=".join(kv) for kv in result["params"].iteritems()]) |
| 425 | + # and also the individual values |
| 426 | + for k, v in result["params"].iteritems(): |
| 427 | + d["extra.jmh.param." + k] = str(v) |
| 428 | + |
| 429 | + for k in self.getExtraJmhKeys(): |
| 430 | + if k in result: |
| 431 | + d["extra.jmh." + k] = str(result[k]) |
| 432 | + |
| 433 | + for jmhFork, rawData in enumerate(pm["rawData"]): |
| 434 | + for iteration, data in enumerate(rawData): |
| 435 | + d2 = d.copy() |
| 436 | + d2.update({ |
| 437 | + "metric.value": float(data), |
| 438 | + "metric.iteration": int(iteration), |
| 439 | + "extra.jmh.fork": str(jmhFork), |
| 440 | + }) |
| 441 | + r.append(d2) |
| 442 | + return r |
| 443 | + |
| 444 | + |
327 | 445 | class StdOutBenchmarkSuite(BenchmarkSuite): |
328 | 446 | """Convenience suite for benchmarks that need to parse standard output. |
329 | 447 |
|
@@ -570,6 +688,41 @@ def run(self, cwd, args): |
570 | 688 | return code, out, dims |
571 | 689 |
|
572 | 690 |
|
| 691 | +class DefaultJavaVm(OutputCapturingJavaVm): |
| 692 | + def __init__(self, raw_name, raw_config_name): |
| 693 | + self.raw_name = raw_name |
| 694 | + self.raw_config_name = raw_config_name |
| 695 | + |
| 696 | + def name(self): |
| 697 | + return self.raw_name |
| 698 | + |
| 699 | + def config_name(self): |
| 700 | + return self.raw_config_name |
| 701 | + |
| 702 | + def post_process_command_line_args(self, args): |
| 703 | + return args |
| 704 | + |
| 705 | + def dimensions(self, cwd, args, code, out): |
| 706 | + return { |
| 707 | + "host-vm": self.name(), |
| 708 | + "host-vm-config": self.config_name(), |
| 709 | + } |
| 710 | + |
| 711 | + def run_java(self, args, out=None, err=None, cwd=None, nonZeroIsFatal=False): |
| 712 | + mx.get_jdk().run_java(args, out=out, err=out, cwd=cwd, nonZeroIsFatal=False) |
| 713 | + |
| 714 | + |
| 715 | +class DummyJavaVm(OutputCapturingJavaVm): |
| 716 | + """ |
| 717 | + Dummy VM to work around: "pylint #111138 disabling R0921 does'nt work" |
| 718 | + https://www.logilab.org/ticket/111138 |
| 719 | +
|
| 720 | + Note that the warning R0921 (abstract-class-little-used) has been removed |
| 721 | + from pylint 1.4.3. |
| 722 | + """ |
| 723 | + pass |
| 724 | + |
| 725 | + |
573 | 726 | def add_java_vm(javavm): |
574 | 727 | key = (javavm.name(), javavm.config_name()) |
575 | 728 | if key in _bm_suite_java_vms: |
@@ -608,6 +761,118 @@ def rules(self, out, benchmarks, bmSuiteArgs): |
608 | 761 | ] |
609 | 762 |
|
610 | 763 |
|
| 764 | +class JMHBenchmarkSuiteBase(JavaBenchmarkSuite): |
| 765 | + """Base class for JMH based benchmark suites.""" |
| 766 | + |
| 767 | + jmh_result_file = "jmh_result.json" |
| 768 | + |
| 769 | + def extraRunArgs(self): |
| 770 | + return ["-rff", JMHBenchmarkSuiteBase.jmh_result_file, "-rf", "json"] |
| 771 | + |
| 772 | + def extraVmArgs(self): |
| 773 | + return [] |
| 774 | + |
| 775 | + def getJMHEntry(self): |
| 776 | + raise NotImplementedError() |
| 777 | + |
| 778 | + def createCommandLineArgs(self, benchmarks, bmSuiteArgs): |
| 779 | + if benchmarks is not None: |
| 780 | + mx.abort("No benchmark should be specified for the selected suite. (Use JMH specific filtering instead.)") |
| 781 | + vmArgs = self.vmArgs(bmSuiteArgs) + self.extraVmArgs() |
| 782 | + runArgs = self.extraRunArgs() + self.runArgs(bmSuiteArgs) |
| 783 | + return vmArgs + self.getJMHEntry() + ['--jvmArgsPrepend', ' '.join(vmArgs)] + runArgs |
| 784 | + |
| 785 | + def benchmarks(self): |
| 786 | + return ["default"] |
| 787 | + |
| 788 | + def successPatterns(self): |
| 789 | + return [ |
| 790 | + re.compile( |
| 791 | + r"# Run complete.", |
| 792 | + re.MULTILINE) |
| 793 | + ] |
| 794 | + |
| 795 | + def benchSuiteName(self): |
| 796 | + return self.name() |
| 797 | + |
| 798 | + def failurePatterns(self): |
| 799 | + return [re.compile(r"<failure>")] |
| 800 | + |
| 801 | + def flakySuccessPatterns(self): |
| 802 | + return [] |
| 803 | + |
| 804 | + def rules(self, out, benchmarks, bmSuiteArgs): |
| 805 | + return [JMHJsonRule(JMHBenchmarkSuiteBase.jmh_result_file, self.benchSuiteName())] |
| 806 | + |
| 807 | + |
| 808 | +class JMHRunnerBenchmarkSuite(JMHBenchmarkSuiteBase): |
| 809 | + """JMH benchmark suite that uses jmh-runner to execute projects with JMH benchmarks.""" |
| 810 | + |
| 811 | + def extraVmArgs(self): |
| 812 | + # find all projects with a direct JMH dependency |
| 813 | + jmhProjects = [] |
| 814 | + for p in mx.projects_opt_limit_to_suites(): |
| 815 | + if 'JMH' in [x.name for x in p.deps]: |
| 816 | + jmhProjects.append(p) |
| 817 | + cp = mx.classpath([p.name for p in jmhProjects], jdk=mx.get_jdk()) |
| 818 | + |
| 819 | + return ['-cp', cp] |
| 820 | + |
| 821 | + def getJMHEntry(self): |
| 822 | + return ["org.openjdk.jmh.Main"] |
| 823 | + |
| 824 | + |
| 825 | + |
| 826 | +class JMHJarBenchmarkSuite(JMHBenchmarkSuiteBase): |
| 827 | + """ |
| 828 | + JMH benchmark suite that executes microbenchmarks in a JMH jar. |
| 829 | +
|
| 830 | + This suite relies on the `--jmh-jar` and `--jmh-name` to be set. The former |
| 831 | + specifies the path to the JMH jar files. The later is the name suffix that is use |
| 832 | + for the bench-suite property. |
| 833 | + """ |
| 834 | + |
| 835 | + def benchSuiteName(self): |
| 836 | + return "jmh-" + self.jmhName() |
| 837 | + |
| 838 | + def vmArgs(self, bmSuiteArgs): |
| 839 | + vmArgs = super(JMHJarBenchmarkSuite, self).vmArgs(bmSuiteArgs) |
| 840 | + parser = ArgumentParser(add_help=False) |
| 841 | + parser.add_argument("--jmh-jar", default=None) |
| 842 | + parser.add_argument("--jmh-name", default=None) |
| 843 | + args, remaining = parser.parse_known_args(vmArgs) |
| 844 | + self.jmh_jar = args.jmh_jar |
| 845 | + self.jmh_name = args.jmh_name |
| 846 | + return remaining |
| 847 | + |
| 848 | + def getJMHEntry(self): |
| 849 | + return ["-jar", self.jmhJAR()] |
| 850 | + |
| 851 | + def jmhName(self): |
| 852 | + if self.jmh_name is None: |
| 853 | + mx.abort("Please use the --jmh-name benchmark suite argument to set the name of the JMH suite.") |
| 854 | + return self.jmh_name |
| 855 | + |
| 856 | + def jmhJAR(self): |
| 857 | + if self.jmh_jar is None: |
| 858 | + mx.abort("Please use the --jmh-jar benchmark suite argument to set the JMH jar file.") |
| 859 | + jmh_jar = os.path.expanduser(self.jmh_jar) |
| 860 | + if not os.path.exists(jmh_jar): |
| 861 | + mx.abort("The --jmh-jar argument points to a non-existing file: " + jmh_jar) |
| 862 | + return jmh_jar |
| 863 | + |
| 864 | + |
| 865 | +class JMHRunnerMxBenchmarkSuite(JMHRunnerBenchmarkSuite): |
| 866 | + def name(self): |
| 867 | + return "jmh-mx" |
| 868 | + |
| 869 | + def group(self): |
| 870 | + return "Graal" |
| 871 | + |
| 872 | + def subgroup(self): |
| 873 | + return "mx" |
| 874 | + |
| 875 | + |
611 | 876 | class BenchmarkExecutor(object): |
612 | 877 | def uid(self): |
613 | 878 | return str(uuid.uuid1()) |
@@ -646,7 +911,7 @@ def machineRam(self): |
646 | 911 |
|
647 | 912 | def branch(self): |
648 | 913 | mxsuite = mx.primary_suite() |
649 | | - name = mxsuite.vc.active_branch(mxsuite.dir, abortOnError=False) or "<unknown>" |
| 914 | + name = mxsuite.vc and mxsuite.vc.active_branch(mxsuite.dir, abortOnError=False) or "<unknown>" |
650 | 915 | return name |
651 | 916 |
|
652 | 917 | def buildUrl(self): |
@@ -789,6 +1054,12 @@ def benchmark(self, mxBenchmarkArgs, bmSuiteArgs): |
789 | 1054 | _benchmark_executor = BenchmarkExecutor() |
790 | 1055 |
|
791 | 1056 |
|
| 1057 | +def init_benchmark_suites(): |
| 1058 | + """Called after mx initialization if mx is the primary suite.""" |
| 1059 | + add_java_vm(DefaultJavaVm("server", "default")) |
| 1060 | + add_bm_suite(JMHRunnerMxBenchmarkSuite()) |
| 1061 | + |
| 1062 | + |
792 | 1063 | def splitArgs(args, separator): |
793 | 1064 | """Splits the list of string arguments at the first separator argument. |
794 | 1065 |
|
|
0 commit comments