Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions src/running/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,3 +318,40 @@ def get_full_args(self, runtime: Runtime) -> List[Union[str, Path]]:
cmd.extend(["-n", "1"]) # one run
cmd.extend(self.program_args)
return cmd

class ChromeBenchmark(Benchmark):
def __init__(self, chrome_args: List[str], js_args: List[str],
timing_iteration: int, benchmark_url: str, **kwargs):
super().__init__(**kwargs)
self.chrome_args = chrome_args
self.js_args = js_args
self.benchmark_url = benchmark_url
self.timing_iteration = timing_iteration

def __str__(self) -> str:
return self.to_string(DummyRuntime("chrome"))

def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'ChromeBenchmark':
jb = super().attach_modifiers(modifiers)
for m in modifiers:
if type(m) == JSArg:
jb.js_args.extend(m.val)
elif type(m) == ChromeArg:
jb.chrome_args.extend(m.val)
elif type(m) == ProgramArg:
jb.program_args.extend(m.val)
return jb

def get_full_args(self, runtime: Runtime) -> List[Union[str, Path]]:
cmd = super().get_full_args(runtime)
cmd.append(runtime.get_executable())
cmd.extend(self.chrome_args)
# Chrome workaround to remove the cache directory between iterations.
cmd.append(" --user-data-dir=/tmp/chrome-expr-cache")
s = ''
for j in self.js_args:
s += j + " "
cmd.append("--js-flags={}".format(s))
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The above can be simplified to cmd.append("--js-flags={}".format(" ".join(self.js_args)))

full_url = "{}?startAutomatically&iterationCount={}&suite={}".format(self.benchmark_url, self.timing_iteration, self.timing_iteration, self.name)
cmd.append(full_url)
return cmd
13 changes: 10 additions & 3 deletions src/running/command/minheap.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,16 @@ def log(s):
log("o ")
return ContinueSearch.HeapTooBig
elif subprocess_exit is SubprocessrExit.Timeout:
# A timeout is likely due to heap being too small and many GCs scheduled back to back
log("t ")
return ContinueSearch.HeapTooSmall
if suite.is_passed(output):
# A hack for Chrome benchmarks so that a run is considered successful
# when Chrome remains active after all iterations complete then killed due to
# a timeout error.
log("o ")
return ContinueSearch.HeapTooBig
else:
# A timeout is likely due to heap being too small and many GCs scheduled back to back
log("t ")
return ContinueSearch.HeapTooSmall
# If not the above scenario, we treat this invocation as a crash or some kind of erroneous state
log(".")
continue
Expand Down
13 changes: 11 additions & 2 deletions src/running/command/runbms.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,8 @@ def hz_to_ghz(hzstr: str) -> str:
def get_log_prologue(runtime: Runtime, bm: Benchmark) -> str:
output = "\n-----\n"
output += "mkdir -p PLOTTY_WORKAROUND; timedrun; "
# Chrome workaround to remove the cache directory between iterations.
output += "rm -rf /tmp/chrome-expr-cache; "
Copy link
Copy Markdown
Member

@caizixian caizixian Mar 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't actually run anything. It gets added to the log file, but that's it. You want to use system to execute commands.

Generally, you probably don't want to do this for all runs (which is the case if it gets put inside get_log_prologue), and probably should avoid hardcoded paths (tempfile.TemporaryDirectory) would be a better choice.

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that running has already created a temporary directory for each run.

https://github.com/anupli/running-ng/blob/815cf54d96b3b50e9c1b3001e63dd6292bd51373/src/running/command/runbms.py#L391C59-L391C69

So you might be able to create a subfolder under it as the temporary directory for Chrome.

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So what might be the cleanest is to use the plugin API to create the temporary folder under runbms_dir for Chrome to use. The plugin object will be created by an instance of Chrome, and will only run when Chrome is used for example.

The plugin object can also delete the folder when the benchmark finishes.

output += bm.to_string(runtime)
output += "\n"
output += "running-ng v{}\n".format(__VERSION__)
Expand Down Expand Up @@ -310,8 +312,15 @@ def run_one_benchmark(
if runtime.is_oom(output):
oomed_count[c] += 1
if exit_status is SubprocessrExit.Timeout:
timeout_count[c] += 1
print(".", end="", flush=True)
if suite.is_passed(output):
# A hack for Chrome benchmarks so that a run is considered successful
# when Chrome remains active after all iterations complete then killed due to
# a timeout error.
config_passed = True
print(config_index_to_chr(j), end="", flush=True)
else:
timeout_count[c] += 1
print(".", end="", flush=True)
elif exit_status is SubprocessrExit.Error:
print(".", end="", flush=True)
elif exit_status is SubprocessrExit.Normal:
Expand Down
9 changes: 9 additions & 0 deletions src/running/modifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,3 +180,12 @@ def __init__(self, value_opts=None, **kwargs):

def __str__(self) -> str:
return "{} JuliaArg {}".format(super().__str__(), self.val)

@register(Modifier)
class ChromeArg(Modifier):
def __init__(self, value_opts=None, **kwargs):
super().__init__(value_opts, **kwargs)
self.val = split_quoted(self._kwargs["val"])

def __str__(self) -> str:
return "{} ChromeArg {}".format(super().__str__(), self.val)
19 changes: 19 additions & 0 deletions src/running/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,25 @@ def is_oom(self, output: bytes) -> bool:
return True
return False

@register(Runtime)
class Chrome(JavaScriptRuntime):
def __str__(self):
return "{} chrome {}".format(super().__str__(), self.executable)

def get_heapsize_modifiers(self, size: int) -> List[Modifier]:
size_str = "{}".format(size)
heapsize = JSArg(
name="heap{}".format(size_str),
val="--initial-heap-size={} --max-heap-size={}".format(
size_str, size_str)
)
return [heapsize]

def is_oom(self, output: bytes) -> bool:
for pattern in [b"Fatal javascript OOM in", b"Fatal JavaScript out of memory", b"V8 javascript OOM", b"<--- Last few GCs --->"]:
if pattern in output:
return True
return False

@register(Runtime)
class SpiderMonkey(JavaScriptRuntime):
Expand Down
71 changes: 71 additions & 0 deletions src/running/suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Benchmark,
JavaScriptBenchmark,
JuliaBenchmark,
ChromeBenchmark,
)
import logging
from running.util import register, split_quoted
Expand Down Expand Up @@ -517,3 +518,73 @@ def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> "JuliaBenchmark"
def is_passed(self, output: bytes) -> bool:
# FIXME
return True

@register(BenchmarkSuite)
class Speedometer(BenchmarkSuite):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timing_iteration = parse_timing_iteration(
kwargs.get("timing_iteration"), "Speedometer")
self.benchmark_url = kwargs.get("benchmark_url")
self.minheap: Optional[str]
self.minheap = kwargs.get("minheap")
self.minheap_values: Dict[str, Dict[str, int]]
self.minheap_values = kwargs.get("minheap_values", {})
if not isinstance(self.minheap_values, dict):
raise TypeError(
"The minheap_values of {} should be a dictionary".format(self.name)
)
if self.minheap:
if not isinstance(self.minheap, str):
raise TypeError(
"The minheap of {} should be a string that selects from a minheap_values".format(
self.name
)
)
if self.minheap not in self.minheap_values:
raise KeyError(
"{} is not a valid entry of {}.minheap_values".format(
self.name, self.name
)
)
self.timeout: Optional[int]
self.timeout = kwargs.get("timeout")

def __str__(self) -> str:
return "{} Speedometer {}".format(super().__str__(), self.benchmark_url)

def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'ChromeBenchmark':
assert type(bm_spec) is str

return ChromeBenchmark(
suite_name=self.name,
name=bm_spec,
chrome_args=[],
js_args=[],
timing_iteration=self.timing_iteration,
benchmark_url=self.benchmark_url,
timeout=self.timeout,
)

def get_minheap(self, bm: Benchmark) -> int:
assert isinstance(bm, ChromeBenchmark)
name = bm.name
if not self.minheap:
logging.warning(
"No minheap_value of {} is selected".format(self))
return __DEFAULT_MINHEAP
minheap = self.minheap_values[self.minheap]
if name not in minheap:
logging.warning(
"Minheap for {} of {} not set".format(name, self))
return __DEFAULT_MINHEAP
return minheap[name]

def is_passed(self, output: bytes) -> bool:
# A hack for Chrome benchmarks so that a run is considered successful
# when Chrome remains active after all iterations complete then killed due to
# a timeout error.
iter_compelted = output.decode("utf-8").count("End MMTk Statistics")
if iter_compelted == self.timing_iteration:
return True
return False