Discard outer warmup and set additional java flags

Change-Id: Ib4de30a4f8d4f8c004bad88d7dcc1be8a0fd1fa6
diff --git a/tools/perf.py b/tools/perf.py
index 592172d..9e7a7b8 100755
--- a/tools/perf.py
+++ b/tools/perf.py
@@ -34,7 +34,11 @@
                         help='Specific app(s) to measure.',
                         action='append')
     result.add_argument('--iterations',
-                        help='How many iterations to run.',
+                        help='How many times run_benchmark is run.',
+                        type=int,
+                        default=1)
+    result.add_argument('--iterations-inner',
+                        help='How many iterations to run inside run_benchmark.',
                         type=int,
                         default=10)
     result.add_argument('--outdir',
@@ -127,16 +131,19 @@
                     print(f'Skipping run, {output} already exists.')
                     continue
 
-            cmd = [
-                'tools/run_benchmark.py', '--benchmark', app, '--iterations',
-                '1', '--target', options.target
+            base_cmd = [
+                'tools/run_benchmark.py', '--benchmark', app, '--target',
+                options.target
             ]
+            if options.verbose:
+                base_cmd.append('--verbose')
             if options.version:
-                cmd.extend(['--version', options.version])
+                base_cmd.extend(['--version', options.version])
 
-            # Build and warmup
+            # Build
             utils.Print(f'Preparing {app}', quiet=options.quiet)
-            subprocess.check_output(cmd)
+            build_cmd = base_cmd + ['--iterations', '0']
+            subprocess.check_output(build_cmd)
 
             # Run benchmark.
             benchmark_result_json_files = []
@@ -144,8 +151,10 @@
                 utils.Print(f'Benchmarking {app} ({i+1}/{options.iterations})',
                             quiet=options.quiet)
                 benchhmark_result_file = os.path.join(temp, f'result_file_{i}')
-                iteration_cmd = cmd + [
-                    '--output', benchhmark_result_file, '--no-build'
+                iteration_cmd = base_cmd + [
+                    '--iterations',
+                    str(options.iterations_inner), '--output',
+                    benchhmark_result_file, '--no-build'
                 ]
                 subprocess.check_output(iteration_cmd)
                 benchmark_result_json_files.append(benchhmark_result_file)
diff --git a/tools/run_benchmark.py b/tools/run_benchmark.py
index 65547a8..ed34d9b 100755
--- a/tools/run_benchmark.py
+++ b/tools/run_benchmark.py
@@ -81,11 +81,17 @@
     result.add_argument('--temp',
                         help='A directory to use for temporaries and outputs.',
                         default=None)
-    return result.parse_known_args(argv)
+    result.add_argument('--verbose',
+                        help='To enable verbose logging.',
+                        action='store_true',
+                        default=False)
+    options, args = result.parse_known_args(argv)
+    options.quiet = not options.verbose
+    return options, args
 
 
 def main(argv, temp):
-    (options, args) = parse_options(argv)
+    options, args = parse_options(argv)
 
     if options.output:
         options.output = os.path.abspath(options.output)
@@ -140,7 +146,10 @@
 
 def run(options, r8jar, testjars):
     jdkhome = get_jdk_home(options, options.benchmark)
-    cmd = [jdk.GetJavaExecutable(jdkhome)]
+    cmd = [
+        jdk.GetJavaExecutable(jdkhome), '-Xms8g', '-Xmx8g',
+        '-XX:+TieredCompilation', '-XX:TieredStopAtLevel=4'
+    ]
     if options.enable_assertions:
         cmd.append('-ea')
     if options.print_times:
@@ -150,7 +159,9 @@
             f'-DTEST_DATA_LOCATION={utils.REPO_ROOT}/d8_r8/test_modules/tests_java_8/build/classes/java/test',
             f'-DTESTBASE_DATA_LOCATION={utils.REPO_ROOT}/d8_r8/test_modules/testbase/build/classes/java/main',
         ])
-    if options.iterations:
+    if options.iterations is not None:
+        if options.iterations == 0:
+            return
         cmd.append(f'-DBENCHMARK_ITERATIONS={options.iterations}')
     if options.output:
         cmd.append(f'-DBENCHMARK_OUTPUT={options.output}')
@@ -163,6 +174,7 @@
         # repository root as an argument. The runner can then setup dependencies.
         'golem' if options.golem else utils.REPO_ROOT,
     ])
+    utils.PrintCmd(cmd, quiet=options.quiet)
     return subprocess.check_call(cmd)