Add HelloWorld D8 benchmarks
Change-Id: I1acb6181d1281f8ddcdb9e2587cf8bf5556c4fd5
diff --git a/src/test/java/com/android/tools/r8/benchmarks/BenchmarkTarget.java b/src/test/java/com/android/tools/r8/benchmarks/BenchmarkTarget.java
index 8b23832..ad6d841 100644
--- a/src/test/java/com/android/tools/r8/benchmarks/BenchmarkTarget.java
+++ b/src/test/java/com/android/tools/r8/benchmarks/BenchmarkTarget.java
@@ -8,9 +8,7 @@
// Possible dashboard targets on golem.
// WARNING: make sure the id-name is 1:1 with tools/run_benchmark.py!
D8("d8", "D8"),
- R8_COMPAT("r8-compat", "R8"),
- R8_NON_COMPAT("r8-full", "R8-full"),
- R8_FORCE_OPT("r8-force", "R8-full-minify-optimize-shrink"),
+ R8("r8-full", "R8-full"),
RETRACE("retrace", "retrace");
private final String idName;
diff --git a/src/test/java/com/android/tools/r8/benchmarks/appdumps/AppDumpBenchmarkBuilder.java b/src/test/java/com/android/tools/r8/benchmarks/appdumps/AppDumpBenchmarkBuilder.java
index 1ba1fd0..06ce01a 100644
--- a/src/test/java/com/android/tools/r8/benchmarks/appdumps/AppDumpBenchmarkBuilder.java
+++ b/src/test/java/com/android/tools/r8/benchmarks/appdumps/AppDumpBenchmarkBuilder.java
@@ -111,7 +111,7 @@
verify();
return BenchmarkConfig.builder()
.setName(name)
- .setTarget(BenchmarkTarget.R8_NON_COMPAT)
+ .setTarget(BenchmarkTarget.R8)
.setSuite(BenchmarkSuite.OPENSOURCE_BENCHMARKS)
.setMethod(runR8(this, configuration))
.setFromRevision(fromRevision)
@@ -130,7 +130,7 @@
verify();
return BenchmarkConfig.builder()
.setName(name)
- .setTarget(BenchmarkTarget.R8_NON_COMPAT)
+ .setTarget(BenchmarkTarget.R8)
.setSuite(BenchmarkSuite.OPENSOURCE_BENCHMARKS)
.setMethod(runR8WithResourceShrinking(this, getDefaultR8Configuration()))
.setFromRevision(fromRevision)
diff --git a/src/test/java/com/android/tools/r8/benchmarks/helloworld/HelloWorldBenchmark.java b/src/test/java/com/android/tools/r8/benchmarks/helloworld/HelloWorldBenchmark.java
index 4c8c31c..77b66a9 100644
--- a/src/test/java/com/android/tools/r8/benchmarks/helloworld/HelloWorldBenchmark.java
+++ b/src/test/java/com/android/tools/r8/benchmarks/helloworld/HelloWorldBenchmark.java
@@ -40,7 +40,7 @@
public static List<BenchmarkConfig> configs() {
Builder<BenchmarkConfig> benchmarks = ImmutableList.builder();
makeBenchmark(BenchmarkTarget.D8, HelloWorldBenchmark::benchmarkD8, benchmarks);
- makeBenchmark(BenchmarkTarget.R8_NON_COMPAT, HelloWorldBenchmark::benchmarkR8, benchmarks);
+ makeBenchmark(BenchmarkTarget.R8, HelloWorldBenchmark::benchmarkR8, benchmarks);
return benchmarks.build();
}
@@ -60,7 +60,7 @@
public String getName() {
// The name include each non-target option for the variants to ensure unique benchmarks.
- String backendString = backend.isCf() ? "Cf" : "Dex";
+ String backendString = backend.isCf() ? "Cf" : "";
String libraryString = library != null ? "" : "NoLib";
return "HelloWorld" + backendString + libraryString;
}
diff --git a/tools/perf.py b/tools/perf.py
index 323c7f3..e7814e7 100755
--- a/tools/perf.py
+++ b/tools/perf.py
@@ -22,6 +22,18 @@
'CraneApp': {
'targets': ['r8-full']
},
+ 'HelloWorld': {
+ 'targets': ['d8']
+ },
+ 'HelloWorldNoLib': {
+ 'targets': ['d8']
+ },
+ 'HelloWorldCf': {
+ 'targets': ['d8']
+ },
+ 'HelloWorldCfNoLib': {
+ 'targets': ['d8']
+ },
'JetLaggedApp': {
'targets': ['r8-full']
},
diff --git a/tools/perf/d8.html b/tools/perf/d8.html
new file mode 100644
index 0000000..810f645
--- /dev/null
+++ b/tools/perf/d8.html
@@ -0,0 +1,257 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>D8 perf</title>
+ <link rel="stylesheet" href="stylesheet.css">
+</head>
+<body>
+ <div id="benchmark-selectors"></div>
+ <div>
+ <canvas id="myChart"></canvas>
+ </div>
+ <div>
+ <div style="float: left; width: 50%">
+ <button type="button" id="show-more-left" disabled>⇐</button>
+ <button type="button" id="show-less-left">⇒</button>
+ </div>
+ <div style="float: left; text-align: right; width: 50%">
+ <button type="button" id="show-less-right">⇐</button>
+ <button type="button" id="show-more-right" disabled>⇒</button>
+ </div>
+ </div>
+ <script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.3/dist/chart.umd.min.js"></script>
+ <script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-datalabels@2.2.0"></script>
+ <script src="extensions.js"></script>
+ <script src="utils.js"></script>
+ <script type="module">
+ import chart from "./chart.js";
+ import dom from "./dom.js";
+ import scales from "./scales.js";
+ import state from "./state.js";
+
+ // Chart data provider.
+ function getData(filteredCommits) {
+ const labels = filteredCommits.map((c, i) => c.index);
+ const datasets = getDatasets(filteredCommits);
+ return {
+ labels: labels,
+ datasets: datasets
+ };
+ }
+
+ function getDatasets(filteredCommits) {
+ const datasets = [];
+ state.forEachSelectedBenchmark(
+ selectedBenchmark => {
+ const codeSizeData =
+ filteredCommits.map(
+ (c, i) => getSingleResult(selectedBenchmark, filteredCommits[i], "code_size"));
+ const codeSizeScatterData = [];
+ for (const commit of filteredCommits.values()) {
+ if (!(selectedBenchmark in commit.benchmarks)) {
+ continue;
+ }
+ const seen = new Set();
+ seen.add(getSingleResult(selectedBenchmark, commit, "code_size"));
+ const codeSizes = getAllResults(selectedBenchmark, commit, "code_size")
+ for (const codeSize of codeSizes.values()) {
+ if (!seen.has(codeSize)) {
+ codeSizeScatterData.push({ x: commit.index, y: codeSize });
+ seen.add(codeSize);
+ }
+ }
+ }
+ const runtimeData =
+ filteredCommits.map(
+ (c, i) =>
+ selectedBenchmark in filteredCommits[i].benchmarks
+ ? getAllResults(selectedBenchmark, filteredCommits[i], "runtime")
+ .min()
+ .ns_to_s()
+ : NaN);
+ const runtimeScatterData = [];
+ for (const commit of filteredCommits.values()) {
+ if (!(selectedBenchmark in commit.benchmarks)) {
+ continue;
+ }
+ const runtimes = getAllResults(selectedBenchmark, commit, "runtime")
+ for (const runtime of runtimes.values()) {
+ runtimeScatterData.push({ x: commit.index, y: runtime.ns_to_s() });
+ }
+ }
+
+ const skipped = (ctx, value) => ctx.p0.skip || ctx.p1.skip ? value : undefined;
+ datasets.push(...[
+ {
+ benchmark: selectedBenchmark,
+ type: 'line',
+ label: 'Dex size',
+ data: codeSizeData,
+ datalabels: {
+ align: 'end',
+ anchor: 'end'
+ },
+ tension: 0.1,
+ yAxisID: 'y',
+ segment: {
+ borderColor: ctx =>
+ skipped(
+ ctx,
+ chart.get()
+ ? chart.get().data.datasets[ctx.datasetIndex].backgroundColor
+ : undefined),
+ borderDash: ctx => skipped(ctx, [6, 6]),
+ },
+ spanGaps: true
+ },
+ {
+ benchmark: selectedBenchmark,
+ type: 'scatter',
+ label: 'Nondeterminism',
+ data: codeSizeScatterData,
+ datalabels: {
+ labels: {
+ value: null
+ }
+ },
+ radius: 6,
+ pointBackgroundColor: 'red'
+ },
+ {
+ benchmark: selectedBenchmark,
+ type: 'line',
+ label: 'Runtime',
+ data: runtimeData,
+ datalabels: {
+ labels: {
+ value: null
+ }
+ },
+ tension: 0.1,
+ yAxisID: 'y_runtime',
+ segment: {
+ borderColor: ctx =>
+ skipped(
+ ctx,
+ chart.get()
+ ? chart.get().data.datasets[ctx.datasetIndex].backgroundColor
+ : undefined),
+ borderDash: ctx => skipped(ctx, [6, 6]),
+ },
+ spanGaps: true
+ },
+ {
+ benchmark: selectedBenchmark,
+ type: 'scatter',
+ label: 'Runtime variance',
+ data: runtimeScatterData,
+ datalabels: {
+ labels: {
+ value: null
+ }
+ },
+ yAxisID: 'y_runtime'
+ }
+ ]);
+ });
+ return datasets;
+ }
+
+ // Chart options.
+ const options = {
+ onHover: (event, chartElement) =>
+ event.native.target.style.cursor =
+ chartElement[0] ? 'pointer' : 'default',
+ plugins: {
+ datalabels: {
+ backgroundColor: 'rgba(255, 255, 255, 0.7)',
+ borderColor: 'rgba(128, 128, 128, 0.7)',
+ borderRadius: 4,
+ borderWidth: 1,
+ color: context => chart.getDataPercentageChange(context) < 0 ? 'green' : 'red',
+ display: context => {
+ var percentageChange = chart.getDataPercentageChange(context);
+ return percentageChange !== null && Math.abs(percentageChange) >= 0.1;
+ },
+ font: {
+ size: 20,
+ weight: 'bold'
+ },
+ offset: 8,
+ formatter: chart.getDataLabelFormatter,
+ padding: 6
+ },
+ legend: {
+ labels: {
+ filter: (legendItem, data) => {
+ // Only retain the legends for the first selected benchmark. If
+ // multiple benchmarks are selected, then use the legends of the
+ // first selected benchmark to control all selected benchmarks.
+ const numUniqueLegends =
+ data.datasets.length / state.selectedBenchmarks.size;
+ return legendItem.datasetIndex < numUniqueLegends;
+ },
+ },
+ onClick: (e, legendItem, legend) => {
+ const clickedLegend = legendItem.text;
+ if (state.selectedLegends.has(clickedLegend)) {
+ state.selectedLegends.delete(clickedLegend);
+ } else {
+ state.selectedLegends.add(clickedLegend);
+ }
+ chart.update(false, true);
+ },
+ },
+ tooltip: {
+ callbacks: {
+ title: context => {
+ const elementInfo = context[0];
+ var commit;
+ if (elementInfo.dataset.type == 'line') {
+ commit = commits[state.zoom.left + elementInfo.dataIndex];
+ } else {
+ console.assert(elementInfo.dataset.type == 'scatter');
+ commit = commits[elementInfo.raw.x];
+ }
+ return commit.title;
+ },
+ footer: context => {
+ const elementInfo = context[0];
+ var commit;
+ if (elementInfo.dataset.type == 'line') {
+ commit = commits[state.zoom.left + elementInfo.dataIndex];
+ } else {
+ console.assert(elementInfo.dataset.type == 'scatter');
+ commit = commits[elementInfo.raw.x];
+ }
+ const dataset = chart.get().data.datasets[elementInfo.datasetIndex];
+ return `App: ${dataset.benchmark}\n`
+ + `Author: ${commit.author}\n`
+ + `Submitted: ${new Date(commit.submitted * 1000).toLocaleString()}\n`
+ + `Hash: ${commit.hash}\n`
+ + `Index: ${commit.index}`;
+ }
+ }
+ }
+ },
+ responsive: true,
+ scales: scales.get()
+ };
+
+ const commits = await state.importCommits("./d8_benchmark_data.json");
+ state.initializeBenchmarks();
+ state.initializeLegends({
+ 'Dex size': { default: true },
+ 'Nondeterminism': { default: true },
+ 'Runtime': { default: true },
+ 'Runtime variance': { default: false }
+ });
+ state.initializeZoom();
+ dom.initializeBenchmarkSelectors();
+ dom.initializeChartNavigation();
+ chart.setDataProvider(getData);
+ chart.initializeChart(options);
+ </script>
+</body>
+</html>
\ No newline at end of file
diff --git a/tools/upload_benchmark_data_to_google_storage.py b/tools/upload_benchmark_data_to_google_storage.py
index 9af3a69..f979010 100755
--- a/tools/upload_benchmark_data_to_google_storage.py
+++ b/tools/upload_benchmark_data_to_google_storage.py
@@ -17,7 +17,7 @@
NUM_COMMITS = 1000
FILES = [
- 'chart.js', 'dom.js', 'extensions.js', 'r8.html', 'retrace.html',
+ 'chart.js', 'd8.html', 'dom.js', 'extensions.js', 'r8.html', 'retrace.html',
'scales.js', 'state.js', 'stylesheet.css', 'url.js', 'utils.js'
]
@@ -102,26 +102,34 @@
# Aggregate all the result.json files into a single file that has the
# same format as tools/perf/benchmark_data.json.
+ d8_benchmark_data = []
r8_benchmark_data = []
retrace_benchmark_data = []
for commit in commits:
+ d8_benchmarks = {}
r8_benchmarks = {}
retrace_benchmarks = {}
for benchmark, benchmark_info in BENCHMARKS.items():
RecordBenchmarkResult(commit, benchmark, benchmark_info,
+ local_bucket, 'd8', d8_benchmarks)
+ RecordBenchmarkResult(commit, benchmark, benchmark_info,
local_bucket, 'r8-full', r8_benchmarks)
RecordBenchmarkResult(commit, benchmark, benchmark_info,
local_bucket, 'retrace',
retrace_benchmarks)
+ RecordBenchmarkResults(commit, d8_benchmarks, d8_benchmark_data)
RecordBenchmarkResults(commit, r8_benchmarks, r8_benchmark_data)
RecordBenchmarkResults(commit, retrace_benchmarks,
retrace_benchmark_data)
# Trim data.
+ d8_benchmark_data = TrimBenchmarkResults(d8_benchmark_data)
r8_benchmark_data = TrimBenchmarkResults(r8_benchmark_data)
retrace_benchmark_data = TrimBenchmarkResults(retrace_benchmark_data)
# Write output files to public bucket.
+ ArchiveBenchmarkResults(d8_benchmark_data, 'd8_benchmark_data.json',
+ temp)
ArchiveBenchmarkResults(r8_benchmark_data, 'r8_benchmark_data.json',
temp)
ArchiveBenchmarkResults(retrace_benchmark_data,