Allow gradle benchmarks to be run individually
Change-Id: I262f61922af7d8efdf2b80e52863f1035ea02882
diff --git a/tools/test_gradle_benchmarks.py b/tools/test_gradle_benchmarks.py
index 6720c4a..189b9da 100755
--- a/tools/test_gradle_benchmarks.py
+++ b/tools/test_gradle_benchmarks.py
@@ -3,7 +3,6 @@
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-
from __future__ import print_function
import argparse
import os
@@ -12,7 +11,6 @@
import gradle
from enum import Enum
-
BENCHMARKS_ROOT_DIR = os.path.join(utils.REPO_ROOT, 'third_party', 'benchmarks')
def parse_arguments():
@@ -24,9 +22,10 @@
choices=['dx', 'd8'],
required=True,
help='Compiler tool to use.')
+ parser.add_argument('--benchmark',
+ help='Which benchmark to run, default all')
return parser.parse_args()
-
class Benchmark:
class Tools(Enum):
D8 = 1
@@ -88,7 +87,6 @@
def EnsurePresence(self):
EnsurePresence(self.rootDirPath, self.displayName)
-
def EnsurePresence(dir, displayName):
if not os.path.exists(dir) or os.path.getmtime(dir + '.tar.gz')\
< os.path.getmtime(dir + '.tar.gz.sha1'):
@@ -111,7 +109,6 @@
return any(namePattern in taskname for namePattern in acceptedGradleTasks)
-
def PrintBuildTimeForGolem(benchmark, stdOut):
for line in stdOut.splitlines():
if 'BENCH' in line and benchmark.moduleName in line:
@@ -138,7 +135,6 @@
print('{}(RunTimeRaw): {} ms'
.format(benchmark.displayName + '-' + taskName, commaSplit[2]))
-
def Main():
args = parse_arguments()
@@ -191,10 +187,16 @@
]
- EnsurePresence(os.path.join('third_party', 'benchmarks', 'android-sdk'), 'android SDK')
- EnsurePresence(os.path.join('third_party', 'gradle-plugin'), 'Android Gradle plugin')
-
- for benchmark in buildTimeBenchmarks:
+ EnsurePresence(os.path.join('third_party', 'benchmarks', 'android-sdk'),
+ 'android SDK')
+ EnsurePresence(os.path.join('third_party', 'gradle-plugin'),
+ 'Android Gradle plugin')
+ toRun = buildTimeBenchmarks
+ if args.benchmark:
+ toRun = [b for b in toRun if b.displayName == args.benchmark]
+ if len(toRun) != 1:
+ raise AssertionError("Unknown benchmark: " + args.benchmark)
+ for benchmark in toRun:
benchmark.EnsurePresence()
benchmark.Clean()
stdOut = benchmark.Build(tool, desugarMode)