Skip to content

Commit

Permalink
Fix how AnalysisReportJsonFileRule looks for the JSON file when diagn…
Browse files Browse the repository at this point in the history
…ostics mode is enabled and the benchmark is bundle based; disable the 'agent' stage for 'micronaut-pegasus' by overriding the 'stages' method - this way the problematic 'agent' stage does not get executed even if it is requested.
  • Loading branch information
akolic committed Dec 2, 2024
1 parent ad42202 commit e4ebc65
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 28 deletions.
10 changes: 2 additions & 8 deletions sdk/mx.sdk/mx_sdk_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -1191,13 +1191,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
"micronaut-similarity": {},
"micronaut-pegasus": {},
"quarkus-hello-world": {},
"quarkus-tika-odt": {
"barista-bench-name": "quarkus-tika",
},
"quarkus-tika-pdf": {
"barista-bench-name": "quarkus-tika",
"workload": "pdf-workload.barista.json",
},
"quarkus-tika": {},
"spring-hello-world": {},
"spring-petclinic": {},
},
Expand Down Expand Up @@ -1262,7 +1256,7 @@ def subgroup(self):

def benchmarkList(self, bmSuiteArgs):
exclude = []
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash
# Barista currently does not support running 'micronaut-pegasus' on the JVM - running it results in a crash (GR-59793)
exclude.append("micronaut-pegasus")
return [b for b in self.completeBenchmarkList(bmSuiteArgs) if not b in exclude]

Expand Down
18 changes: 11 additions & 7 deletions substratevm/mx.substratevm/mx_substratevm_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import re
from glob import glob
from pathlib import Path
from typing import List

import mx
import mx_benchmark
Expand Down Expand Up @@ -287,13 +288,16 @@ def benchmarkName(self):
return self.context.benchmark

def benchmarkList(self, bmSuiteArgs):
return self.completeBenchmarkList(bmSuiteArgs)

def default_stages(self):
if self.context.benchmark == "micronaut-pegasus":
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM
return ['instrument-image', 'instrument-run', 'image', 'run']
return super().default_stages()
exclude = []
return [b for b in self.completeBenchmarkList(bmSuiteArgs) if b not in exclude]

def stages(self, bm_suite_args: List[str]) -> List[mx_sdk_benchmark.Stage]:
stages = super().stages(bm_suite_args)
if self.context.benchmark == "micronaut-pegasus" and mx_sdk_benchmark.Stage.AGENT in stages:
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM (GR-59793)
stages.remove(mx_sdk_benchmark.Stage.AGENT)
mx.warn(f"Skipping the 'agent' stage as it is not supported for the 'micronaut-pegasus' benchmark. The stages that will be executed are: {[stage.value for stage in stages]}")
return stages

def application_nib(self):
if self.benchmarkName() not in self._application_nibs:
Expand Down
44 changes: 31 additions & 13 deletions vm/mx.vm/mx_vm_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc

# Path to the X.nib bundle file --bundle-apply is specified
bundle_apply_path = self.get_bundle_path_if_present()
self.is_bundle_based = bundle_apply_path is not None
# Path to the X.output directory if a bundle is created
# In that case, files generated by Native Image are generated in that folder structure
bundle_create_path = self.get_bundle_create_path_if_present()
Expand All @@ -197,13 +198,13 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc

if bundle_create_path:
self.bundle_output_path = bundle_create_path
elif bundle_apply_path:
elif self.is_bundle_based:
bundle_dir = bundle_apply_path.parent
bundle_name = bundle_apply_path.name
assert bundle_name.endswith(BUNDLE_EXTENSION), bundle_name
self.bundle_output_path = bundle_dir / f"{bundle_name[:-len(BUNDLE_EXTENSION)]}.output"

if not bundle_apply_path:
if not self.is_bundle_based:
base_image_build_args += self.classpath_arguments
base_image_build_args += self.modulepath_arguments
base_image_build_args += self.executable
Expand Down Expand Up @@ -258,7 +259,7 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc
# Inform the StagesInfo object about removed stages
bm_suite.stages_info.setup(removed_stages)

bundle_args = [f'--bundle-apply={bundle_apply_path}'] if bundle_apply_path else []
bundle_args = [f'--bundle-apply={bundle_apply_path}'] if self.is_bundle_based else []
# benchmarks are allowed to use experimental options
# the bundle might also inject experimental options, but they will be appropriately locked/unlocked.
self.base_image_build_args = [os.path.join(vm.home(), 'bin', 'native-image')] + svm_experimental_options(base_image_build_args) + bundle_args
Expand Down Expand Up @@ -972,7 +973,7 @@ def image_build_general_rules(self, benchmarks):

def image_build_analysis_rules(self, benchmarks):
return [
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
"bench-suite": self.config.benchmark_suite_name,
"benchmark": benchmarks[0],
"metric.name": "analysis-stats",
Expand All @@ -983,7 +984,7 @@ def image_build_analysis_rules(self, benchmarks):
"metric.iteration": 0,
"metric.object": "call-edges",
}, ['total_call_edges']),
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
"bench-suite": self.config.benchmark_suite_name,
"benchmark": benchmarks[0],
"metric.name": "analysis-stats",
Expand All @@ -994,7 +995,7 @@ def image_build_analysis_rules(self, benchmarks):
"metric.iteration": 0,
"metric.object": "reachable-types",
}, ['total_reachable_types']),
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
"bench-suite": self.config.benchmark_suite_name,
"benchmark": benchmarks[0],
"metric.name": "analysis-stats",
Expand All @@ -1005,7 +1006,7 @@ def image_build_analysis_rules(self, benchmarks):
"metric.iteration": 0,
"metric.object": "reachable-methods",
}, ['total_reachable_methods']),
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
"bench-suite": self.config.benchmark_suite_name,
"benchmark": benchmarks[0],
"metric.name": "analysis-stats",
Expand All @@ -1016,7 +1017,7 @@ def image_build_analysis_rules(self, benchmarks):
"metric.iteration": 0,
"metric.object": "reachable-fields",
}, ['total_reachable_fields']),
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.is_gate, {
AnalysisReportJsonFileRule(self.config.image_build_reports_directory, self.config.bundle_output_path, self.is_gate, self.config.is_bundle_based, {
"bench-suite": self.config.benchmark_suite_name,
"benchmark": benchmarks[0],
"metric.name": "analysis-stats",
Expand Down Expand Up @@ -1427,20 +1428,37 @@ class AnalysisReportJsonFileRule(mx_benchmark.JsonStdOutFileRule):
final path of the ``reports`` directory, instead.
"""

def __init__(self, report_directory, is_diagnostics_mode, replacement, keys):
def __init__(self, report_directory, bundle_output_dir, is_diagnostics_mode, is_bundle_based, replacement, keys):
super().__init__(r"^# Printing analysis results stats to: (?P<path>\S+?)$", "path", replacement, keys)
self.is_diagnostics_mode = is_diagnostics_mode
self.is_bundle_based = is_bundle_based
self.report_directory = report_directory
self.bundle_output_dir = bundle_output_dir

def get_diagnostics_dir_name(self, json_file_path) -> Path:
"""Extracts the name of the diagnostics directory, the directory containing the JSON file, from the absolute path of the JSON file."""
return Path(json_file_path).parent.name

def get_base_search_dir(self, json_file_path) -> Path:
"""Returns the absolute path to the directory where we expect to find the JSON file containing analysis results stats.
DEVELOPER NOTE:
Unfortunately, the analysis results JSON file ends up in different locations depending on:
- whether the diagnostics mode is enabled (the results end up inside the diagnostics directory)
- whether the benchmark is bundle based (the diagnostics directory ends up in the "other" subdirectory of the bundle output directory)
"""
if self.is_diagnostics_mode and self.is_bundle_based:
return self.bundle_output_dir / "other" / self.get_diagnostics_dir_name(json_file_path)
if self.is_diagnostics_mode:
return self.report_directory / self.get_diagnostics_dir_name(json_file_path)
return self.report_directory

def getJsonFiles(self, text):
json_files = super().getJsonFiles(text)
found_json_files = []
for json_file_path in json_files:
json_file_name = os.path.basename(json_file_path)
base_search_dir = self.report_directory
if self.is_diagnostics_mode:
base_search_dir = os.path.join(base_search_dir, os.path.basename(os.path.dirname(json_file_path)))
expected_json_file_path = os.path.join(base_search_dir, json_file_name)
expected_json_file_path = os.path.join(self.get_base_search_dir(json_file_path), json_file_name)
if exists(expected_json_file_path):
found_json_files.append(expected_json_file_path)
else:
Expand Down

0 comments on commit e4ebc65

Please sign in to comment.