1#!/usr/bin/env python3 2# Copyright 2014 the V8 project authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6from collections import namedtuple 7import json 8import os 9import platform 10import shutil 11import subprocess 12import sys 13import tempfile 14import unittest 15 16import coverage 17import mock 18 19# Requires python-coverage and python-mock. Native python coverage 20# version >= 3.7.1 should be installed to get the best speed. 21 22BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 23RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py') 24TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata') 25 26TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf') 27 28SORT_KEY = lambda x: x['graphs'] 29 30V8_JSON = { 31 'path': ['.'], 32 'owners': ['username@chromium.org'], 33 'binary': 'd7', 34 'timeout': 60, 35 'flags': ['--flag'], 36 'main': 'run.js', 37 'run_count': 1, 38 'results_regexp': '^%s: (.+)$', 39 'tests': [ 40 {'name': 'Richards'}, 41 {'name': 'DeltaBlue'}, 42 ] 43} 44 45V8_NESTED_SUITES_JSON = { 46 'path': ['.'], 47 'owners': ['username@chromium.org'], 48 'flags': ['--flag'], 49 'run_count': 1, 50 'units': 'score', 51 'tests': [ 52 {'name': 'Richards', 53 'path': ['richards'], 54 'binary': 'd7', 55 'main': 'run.js', 56 'resources': ['file1.js', 'file2.js'], 57 'run_count': 2, 58 'results_regexp': '^Richards: (.+)$'}, 59 {'name': 'Sub', 60 'path': ['sub'], 61 'tests': [ 62 {'name': 'Leaf', 63 'path': ['leaf'], 64 'run_count_x64': 3, 65 'units': 'ms', 66 'main': 'run.js', 67 'results_regexp': '^Simple: (.+) ms.$'}, 68 ] 69 }, 70 {'name': 'DeltaBlue', 71 'path': ['delta_blue'], 72 'main': 'run.js', 73 'flags': ['--flag2'], 74 'results_regexp': '^DeltaBlue: (.+)$'}, 75 {'name': 'ShouldntRun', 76 'path': ['.'], 77 'archs': ['arm'], 78 'main': 'run.js'}, 79 ] 80} 81 82V8_GENERIC_JSON = { 83 'path': ['.'], 84 'owners': ['username@chromium.org'], 85 'binary': 'cc', 86 'flags': ['--flag'], 87 'generic': True, 88 'run_count': 1, 89 'units': 'ms', 90} 91 92 93class UnitTest(unittest.TestCase): 94 @classmethod 95 def setUpClass(cls): 96 sys.path.insert(0, BASE_DIR) 97 import run_perf 98 global run_perf 99 100 def testBuildDirectory(self): 101 base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out') 102 expected_path = os.path.join(base_path, 'build') 103 self.assertEqual(expected_path, 104 run_perf.find_build_directory(base_path, 'x64')) 105 106 107class PerfTest(unittest.TestCase): 108 @classmethod 109 def setUpClass(cls): 110 sys.path.insert(0, BASE_DIR) 111 cls._cov = coverage.coverage( 112 include=([os.path.join(BASE_DIR, 'run_perf.py')])) 113 cls._cov.start() 114 import run_perf 115 from testrunner.local import command 116 from testrunner.objects.output import Output, NULL_OUTPUT 117 global command, run_perf, Output, NULL_OUTPUT 118 119 @classmethod 120 def tearDownClass(cls): 121 cls._cov.stop() 122 print('') 123 print(cls._cov.report()) 124 125 def setUp(self): 126 self.maxDiff = None 127 if os.path.exists(TEST_WORKSPACE): 128 shutil.rmtree(TEST_WORKSPACE) 129 os.makedirs(TEST_WORKSPACE) 130 131 def tearDown(self): 132 mock.patch.stopall() 133 if os.path.exists(TEST_WORKSPACE): 134 shutil.rmtree(TEST_WORKSPACE) 135 136 def _WriteTestInput(self, json_content): 137 self._test_input = os.path.join(TEST_WORKSPACE, 'test.json') 138 with open(self._test_input, 'w') as f: 139 f.write(json.dumps(json_content)) 140 141 def _MockCommand(self, *args, **kwargs): 142 on_bots = kwargs.pop('on_bots', False) 143 # Fake output for each test run. 144 test_outputs = [Output(stdout=arg, 145 timed_out=kwargs.get('timed_out', False), 146 exit_code=kwargs.get('exit_code', 0), 147 duration=42) 148 for arg in args[1]] 149 def create_cmd(*args, **kwargs): 150 cmd = mock.MagicMock() 151 def execute(*args, **kwargs): 152 return test_outputs.pop() 153 cmd.execute = mock.MagicMock(side_effect=execute) 154 return cmd 155 156 mock.patch.object( 157 run_perf.command, 'PosixCommand', 158 mock.MagicMock(side_effect=create_cmd)).start() 159 160 build_dir = 'Release' if on_bots else 'x64.release' 161 out_dirs = ['out', 'out-secondary'] 162 return_values = [ 163 os.path.join(os.path.dirname(BASE_DIR), out, build_dir) 164 for out in out_dirs 165 ] 166 mock.patch.object( 167 run_perf, 'find_build_directory', 168 mock.MagicMock(side_effect=return_values)).start() 169 170 # Check that d8 is called from the correct cwd for each test run. 171 dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]] 172 def chdir(*args, **kwargs): 173 self.assertEqual(dirs.pop(), args[0]) 174 os.chdir = mock.MagicMock(side_effect=chdir) 175 176 subprocess.check_call = mock.MagicMock() 177 platform.system = mock.MagicMock(return_value='Linux') 178 179 def _CallMain(self, *args): 180 self._test_output = os.path.join(TEST_WORKSPACE, 'results.json') 181 all_args=[ 182 '--json-test-results', 183 self._test_output, 184 self._test_input, 185 ] 186 all_args += args 187 return run_perf.Main(all_args) 188 189 def _LoadResults(self, file_name=None): 190 with open(file_name or self._test_output) as f: 191 return json.load(f) 192 193 def _VerifyResults(self, suite, units, traces, file_name=None): 194 self.assertListEqual(sorted([ 195 {'units': units, 196 'graphs': [suite, trace['name']], 197 'results': trace['results'], 198 'stddev': trace['stddev']} for trace in traces], key=SORT_KEY), 199 sorted(self._LoadResults(file_name)['traces'], key=SORT_KEY)) 200 201 def _VerifyRunnableDurations(self, runs, timeout, file_name=None): 202 self.assertListEqual([ 203 { 204 'graphs': ['test'], 205 'durations': [42] * runs, 206 'timeout': timeout, 207 }, 208 ], self._LoadResults(file_name)['runnables']) 209 210 def _VerifyErrors(self, errors): 211 self.assertListEqual(errors, self._LoadResults()['errors']) 212 213 def _VerifyMock(self, binary, *args, **kwargs): 214 shell = os.path.join(os.path.dirname(BASE_DIR), binary) 215 command.Command.assert_called_with( 216 cmd_prefix=[], 217 shell=shell, 218 args=list(args), 219 timeout=kwargs.get('timeout', 60), 220 handle_sigterm=True) 221 222 def _VerifyMockMultiple(self, *args, **kwargs): 223 self.assertEqual(len(args), len(command.Command.call_args_list)) 224 for arg, actual in zip(args, command.Command.call_args_list): 225 expected = { 226 'cmd_prefix': [], 227 'shell': os.path.join(os.path.dirname(BASE_DIR), arg[0]), 228 'args': list(arg[1:]), 229 'timeout': kwargs.get('timeout', 60), 230 'handle_sigterm': True, 231 } 232 self.assertTupleEqual((expected, ), actual) 233 234 def testOneRun(self): 235 self._WriteTestInput(V8_JSON) 236 self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n']) 237 self.assertEqual(0, self._CallMain()) 238 self._VerifyResults('test', 'score', [ 239 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 240 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 241 ]) 242 self._VerifyRunnableDurations(1, 60) 243 self._VerifyErrors([]) 244 self._VerifyMock( 245 os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js') 246 247 def testOneRunWithTestFlags(self): 248 test_input = dict(V8_JSON) 249 test_input['test_flags'] = ['2', 'test_name'] 250 self._WriteTestInput(test_input) 251 self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567']) 252 self.assertEqual(0, self._CallMain()) 253 self._VerifyResults('test', 'score', [ 254 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 255 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 256 ]) 257 self._VerifyErrors([]) 258 self._VerifyMock(os.path.join( 259 'out', 'x64.release', 'd7'), '--flag', 'run.js', '--', '2', 'test_name') 260 261 def testTwoRuns_Units_SuiteName(self): 262 test_input = dict(V8_JSON) 263 test_input['run_count'] = 2 264 test_input['name'] = 'v8' 265 test_input['units'] = 'ms' 266 self._WriteTestInput(test_input) 267 self._MockCommand(['.', '.'], 268 ['Richards: 100\nDeltaBlue: 200\n', 269 'Richards: 50\nDeltaBlue: 300\n']) 270 self.assertEqual(0, self._CallMain()) 271 self._VerifyResults('v8', 'ms', [ 272 {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''}, 273 {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''}, 274 ]) 275 self._VerifyErrors([]) 276 self._VerifyMock(os.path.join( 277 'out', 'x64.release', 'd7'), '--flag', 'run.js') 278 279 def testTwoRuns_SubRegexp(self): 280 test_input = dict(V8_JSON) 281 test_input['run_count'] = 2 282 del test_input['results_regexp'] 283 test_input['tests'][0]['results_regexp'] = '^Richards: (.+)$' 284 test_input['tests'][1]['results_regexp'] = '^DeltaBlue: (.+)$' 285 self._WriteTestInput(test_input) 286 self._MockCommand(['.', '.'], 287 ['Richards: 100\nDeltaBlue: 200\n', 288 'Richards: 50\nDeltaBlue: 300\n']) 289 self.assertEqual(0, self._CallMain()) 290 self._VerifyResults('test', 'score', [ 291 {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''}, 292 {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''}, 293 ]) 294 self._VerifyErrors([]) 295 self._VerifyMock(os.path.join( 296 'out', 'x64.release', 'd7'), '--flag', 'run.js') 297 298 def testPerfectConfidenceRuns(self): 299 self._WriteTestInput(V8_JSON) 300 self._MockCommand( 301 ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'] * 10) 302 self.assertEqual(0, self._CallMain('--confidence-level', '1')) 303 self._VerifyResults('test', 'score', [ 304 {'name': 'Richards', 'results': [1.234] * 10, 'stddev': ''}, 305 {'name': 'DeltaBlue', 'results': [10657567.0] * 10, 'stddev': ''}, 306 ]) 307 self._VerifyErrors([]) 308 self._VerifyMock(os.path.join( 309 'out', 'x64.release', 'd7'), '--flag', 'run.js') 310 311 def testNoisyConfidenceRuns(self): 312 self._WriteTestInput(V8_JSON) 313 self._MockCommand( 314 ['.'], 315 reversed([ 316 # First 10 runs are mandatory. DeltaBlue is slightly noisy. 317 'x\nRichards: 1.234\nDeltaBlue: 10757567\ny\n', 318 'x\nRichards: 1.234\nDeltaBlue: 10557567\ny\n', 319 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 320 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 321 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 322 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 323 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 324 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 325 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 326 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 327 # Need 4 more runs for confidence in DeltaBlue results. 328 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 329 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 330 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 331 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', 332 ]), 333 ) 334 self.assertEqual(0, self._CallMain('--confidence-level', '1')) 335 self._VerifyResults('test', 'score', [ 336 {'name': 'Richards', 'results': [1.234] * 14, 'stddev': ''}, 337 { 338 'name': 'DeltaBlue', 339 'results': [10757567.0, 10557567.0] + [10657567.0] * 12, 340 'stddev': '', 341 }, 342 ]) 343 self._VerifyErrors([]) 344 self._VerifyMock(os.path.join( 345 'out', 'x64.release', 'd7'), '--flag', 'run.js') 346 347 def testNestedSuite(self): 348 self._WriteTestInput(V8_NESTED_SUITES_JSON) 349 self._MockCommand(['delta_blue', 'sub/leaf', 'richards'], 350 ['DeltaBlue: 200\n', 351 'Simple: 1 ms.\n', 352 'Simple: 2 ms.\n', 353 'Simple: 3 ms.\n', 354 'Richards: 100\n', 355 'Richards: 50\n']) 356 self.assertEqual(0, self._CallMain()) 357 self.assertListEqual(sorted([ 358 {'units': 'score', 359 'graphs': ['test', 'Richards'], 360 'results': [50.0, 100.0], 361 'stddev': ''}, 362 {'units': 'ms', 363 'graphs': ['test', 'Sub', 'Leaf'], 364 'results': [3.0, 2.0, 1.0], 365 'stddev': ''}, 366 {'units': 'score', 367 'graphs': ['test', 'DeltaBlue'], 368 'results': [200.0], 369 'stddev': ''}, 370 ], key=SORT_KEY), sorted(self._LoadResults()['traces'], key=SORT_KEY)) 371 self._VerifyErrors([]) 372 self._VerifyMockMultiple( 373 (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'), 374 (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'), 375 (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'), 376 (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'), 377 (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'), 378 (os.path.join('out', 'x64.release', 'd8'), 379 '--flag', '--flag2', 'run.js')) 380 381 def testOneRunStdDevRegExp(self): 382 test_input = dict(V8_JSON) 383 test_input['stddev_regexp'] = r'^%s-stddev: (.+)$' 384 self._WriteTestInput(test_input) 385 self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n' 386 'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n']) 387 self.assertEqual(0, self._CallMain()) 388 self._VerifyResults('test', 'score', [ 389 {'name': 'Richards', 'results': [1.234], 'stddev': '0.23'}, 390 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': '106'}, 391 ]) 392 self._VerifyErrors([]) 393 self._VerifyMock( 394 os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js') 395 396 def testTwoRunsStdDevRegExp(self): 397 test_input = dict(V8_JSON) 398 test_input['stddev_regexp'] = r'^%s-stddev: (.+)$' 399 test_input['run_count'] = 2 400 self._WriteTestInput(test_input) 401 self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n' 402 'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n', 403 'Richards: 2\nRichards-stddev: 0.5\n' 404 'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n']) 405 self.assertEqual(1, self._CallMain()) 406 self._VerifyResults('test', 'score', [ 407 {'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'}, 408 {'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'}, 409 ]) 410 self._VerifyErrors([ 411 'Test test/Richards should only run once since a stddev is provided ' 412 'by the test.', 413 'Test test/DeltaBlue should only run once since a stddev is provided ' 414 'by the test.', 415 r'Regexp "^DeltaBlue-stddev: (.+)$" did not match for test ' 416 r'test/DeltaBlue.' 417 ]) 418 self._VerifyMock( 419 os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js') 420 421 def testBuildbot(self): 422 self._WriteTestInput(V8_JSON) 423 self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], 424 on_bots=True) 425 mock.patch.object( 426 run_perf.Platform, 'ReadBuildConfig', 427 mock.MagicMock(return_value={'is_android': False})).start() 428 self.assertEqual(0, self._CallMain()) 429 self._VerifyResults('test', 'score', [ 430 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 431 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 432 ]) 433 self._VerifyErrors([]) 434 self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js') 435 436 def testBuildbotWithTotal(self): 437 test_input = dict(V8_JSON) 438 test_input['total'] = True 439 self._WriteTestInput(test_input) 440 self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], 441 on_bots=True) 442 mock.patch.object( 443 run_perf.Platform, 'ReadBuildConfig', 444 mock.MagicMock(return_value={'is_android': False})).start() 445 self.assertEqual(0, self._CallMain()) 446 self._VerifyResults('test', 'score', [ 447 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 448 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 449 {'name': 'Total', 'results': [3626.491097190233], 'stddev': ''}, 450 ]) 451 self._VerifyErrors([]) 452 self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js') 453 454 def testBuildbotWithTotalAndErrors(self): 455 test_input = dict(V8_JSON) 456 test_input['total'] = True 457 self._WriteTestInput(test_input) 458 self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'], 459 on_bots=True) 460 mock.patch.object( 461 run_perf.Platform, 'ReadBuildConfig', 462 mock.MagicMock(return_value={'is_android': False})).start() 463 self.assertEqual(1, self._CallMain()) 464 self._VerifyResults('test', 'score', [ 465 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 466 ]) 467 self._VerifyErrors( 468 ['Regexp "^Richards: (.+)$" ' 469 'returned a non-numeric for test test/Richards.', 470 'Not all traces have produced results. Can not compute total for ' 471 'test.']) 472 self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js') 473 474 def testRegexpNoMatch(self): 475 self._WriteTestInput(V8_JSON) 476 self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n']) 477 self.assertEqual(1, self._CallMain()) 478 self._VerifyResults('test', 'score', [ 479 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 480 ]) 481 self._VerifyErrors( 482 ['Regexp "^Richards: (.+)$" did not match for test test/Richards.']) 483 self._VerifyMock( 484 os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js') 485 486 def testOneRunCrashed(self): 487 test_input = dict(V8_JSON) 488 test_input['retry_count'] = 1 489 self._WriteTestInput(test_input) 490 self._MockCommand( 491 ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', ''], 492 exit_code=-1) 493 self.assertEqual(1, self._CallMain()) 494 self._VerifyResults('test', 'score', []) 495 self._VerifyErrors([]) 496 self._VerifyMock( 497 os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js') 498 499 def testOneRunTimingOut(self): 500 test_input = dict(V8_JSON) 501 test_input['timeout'] = 70 502 test_input['retry_count'] = 0 503 self._WriteTestInput(test_input) 504 self._MockCommand(['.'], [''], timed_out=True) 505 self.assertEqual(1, self._CallMain()) 506 self._VerifyResults('test', 'score', []) 507 self._VerifyErrors([]) 508 self._VerifyMock(os.path.join('out', 'x64.release', 'd7'), 509 '--flag', 'run.js', timeout=70) 510 511 def testAndroid(self): 512 self._WriteTestInput(V8_JSON) 513 mock.patch('run_perf.AndroidPlatform.PreExecution').start() 514 mock.patch('run_perf.AndroidPlatform.PostExecution').start() 515 mock.patch('run_perf.AndroidPlatform.PreTests').start() 516 mock.patch('run_perf.find_build_directory').start() 517 mock.patch( 518 'run_perf.AndroidPlatform.Run', 519 return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'), 520 NULL_OUTPUT)).start() 521 mock.patch('testrunner.local.android._Driver', autospec=True).start() 522 mock.patch( 523 'run_perf.Platform.ReadBuildConfig', 524 return_value={'is_android': True}).start() 525 self.assertEqual(0, self._CallMain('--arch', 'arm')) 526 self._VerifyResults('test', 'score', [ 527 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 528 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 529 ]) 530 531 def testTwoRuns_Trybot(self): 532 test_input = dict(V8_JSON) 533 test_input['run_count'] = 2 534 self._WriteTestInput(test_input) 535 self._MockCommand(['.', '.', '.', '.'], 536 ['Richards: 100\nDeltaBlue: 200\n', 537 'Richards: 200\nDeltaBlue: 20\n', 538 'Richards: 50\nDeltaBlue: 200\n', 539 'Richards: 100\nDeltaBlue: 20\n']) 540 test_output_secondary = os.path.join( 541 TEST_WORKSPACE, 'results_secondary.json') 542 self.assertEqual(0, self._CallMain( 543 '--outdir-secondary', 'out-secondary', 544 '--json-test-results-secondary', test_output_secondary, 545 )) 546 self._VerifyResults('test', 'score', [ 547 {'name': 'Richards', 'results': [100.0, 200.0], 'stddev': ''}, 548 {'name': 'DeltaBlue', 'results': [20.0, 20.0], 'stddev': ''}, 549 ]) 550 self._VerifyResults('test', 'score', [ 551 {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''}, 552 {'name': 'DeltaBlue', 'results': [200.0, 200.0], 'stddev': ''}, 553 ], test_output_secondary) 554 self._VerifyRunnableDurations(2, 60, test_output_secondary) 555 self._VerifyErrors([]) 556 self._VerifyMockMultiple( 557 (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'), 558 (os.path.join('out-secondary', 'x64.release', 'd7'), 559 '--flag', 'run.js'), 560 (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'), 561 (os.path.join('out-secondary', 'x64.release', 'd7'), 562 '--flag', 'run.js'), 563 ) 564 565 def testWrongBinaryWithProf(self): 566 test_input = dict(V8_JSON) 567 self._WriteTestInput(test_input) 568 self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n']) 569 self.assertEqual(0, self._CallMain('--extra-flags=--prof')) 570 self._VerifyResults('test', 'score', [ 571 {'name': 'Richards', 'results': [1.234], 'stddev': ''}, 572 {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, 573 ]) 574 self._VerifyErrors([]) 575 self._VerifyMock(os.path.join('out', 'x64.release', 'd7'), 576 '--flag', '--prof', 'run.js') 577 578 ############################################################################# 579 ### System tests 580 581 def _RunPerf(self, mocked_d8, test_json): 582 output_json = os.path.join(TEST_WORKSPACE, 'output.json') 583 args = [ 584 os.sys.executable, RUN_PERF, 585 '--binary-override-path', os.path.join(TEST_DATA, mocked_d8), 586 '--json-test-results', output_json, 587 os.path.join(TEST_DATA, test_json), 588 ] 589 subprocess.check_output(args) 590 return self._LoadResults(output_json) 591 592 def testNormal(self): 593 results = self._RunPerf('d8_mocked1.py', 'test1.json') 594 self.assertListEqual([], results['errors']) 595 self.assertListEqual(sorted([ 596 { 597 'units': 'score', 598 'graphs': ['test1', 'Richards'], 599 'results': [1.2, 1.2], 600 'stddev': '', 601 }, 602 { 603 'units': 'score', 604 'graphs': ['test1', 'DeltaBlue'], 605 'results': [2.1, 2.1], 606 'stddev': '', 607 }, 608 ], key=SORT_KEY), sorted(results['traces'], key=SORT_KEY)) 609 610 def testResultsProcessor(self): 611 results = self._RunPerf('d8_mocked2.py', 'test2.json') 612 self.assertListEqual([], results['errors']) 613 self.assertListEqual([ 614 { 615 'units': 'score', 616 'graphs': ['test2', 'Richards'], 617 'results': [1.2, 1.2], 618 'stddev': '', 619 }, 620 { 621 'units': 'score', 622 'graphs': ['test2', 'DeltaBlue'], 623 'results': [2.1, 2.1], 624 'stddev': '', 625 }, 626 ], results['traces']) 627 628 def testResultsProcessorNested(self): 629 results = self._RunPerf('d8_mocked2.py', 'test3.json') 630 self.assertListEqual([], results['errors']) 631 self.assertListEqual([ 632 { 633 'units': 'score', 634 'graphs': ['test3', 'Octane', 'Richards'], 635 'results': [1.2], 636 'stddev': '', 637 }, 638 { 639 'units': 'score', 640 'graphs': ['test3', 'Octane', 'DeltaBlue'], 641 'results': [2.1], 642 'stddev': '', 643 }, 644 ], results['traces']) 645 646 647if __name__ == '__main__': 648 unittest.main() 649