1#! /usr/bin/env vpython3 2# 3# Copyright 2020 The ANGLE Project Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6# 7""" 8Script testing capture_replay with angle_end2end_tests 9""" 10 11# Automation script will: 12# 1. Build all tests in angle_end2end with frame capture enabled 13# 2. Run each test with frame capture 14# 3. Build CaptureReplayTest with cpp trace files 15# 4. Run CaptureReplayTest 16# 5. Output the number of test successes and failures. A test succeeds if no error occurs during 17# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure 18# will return non-zero exit code 19 20# Run this script with Python to test capture replay on angle_end2end tests 21# python path/to/capture_replay_tests.py 22# Command line arguments: run with --help for a full list. 23 24import argparse 25import difflib 26import distutils.util 27import fnmatch 28import json 29import logging 30import math 31import multiprocessing 32import os 33import psutil 34import queue 35import re 36import shutil 37import subprocess 38import sys 39import time 40import traceback 41 42PIPE_STDOUT = True 43DEFAULT_OUT_DIR = "out/CaptureReplayTest" # relative to angle folder 44DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader" 45DEFAULT_TEST_SUITE = "angle_end2end_tests" 46REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests" # relative to angle folder 47DEFAULT_BATCH_COUNT = 8 # number of tests batched together 48TRACE_FILE_SUFFIX = "_context" # because we only deal with 1 context right now 49RESULT_TAG = "*RESULT" 50STATUS_MESSAGE_PERIOD = 20 # in seconds 51SUBPROCESS_TIMEOUT = 600 # in seconds 52DEFAULT_RESULT_FILE = "results.txt" 53DEFAULT_LOG_LEVEL = "info" 54DEFAULT_MAX_JOBS = 8 55DEFAULT_MAX_NINJA_JOBS = 3 56REPLAY_BINARY = "capture_replay_tests" 57if sys.platform == "win32": 58 REPLAY_BINARY += ".exe" 59TRACE_FOLDER = "traces" 60 61EXIT_SUCCESS = 0 62EXIT_FAILURE = 1 63 64switch_case_without_return_template = """\ 65 case {case}: 66 {namespace}::{call}({params}); 67 break; 68""" 69 70switch_case_with_return_template = """\ 71 case {case}: 72 return {namespace}::{call}({params}); 73""" 74 75default_case_without_return_template = """\ 76 default: 77 break;""" 78default_case_with_return_template = """\ 79 default: 80 return {default_val};""" 81 82 83def winext(name, ext): 84 return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name 85 86 87def AutodetectGoma(): 88 return winext('compiler_proxy', 'exe') in (p.name() for p in psutil.process_iter()) 89 90 91class SubProcess(): 92 93 def __init__(self, command, logger, env=os.environ, pipe_stdout=PIPE_STDOUT): 94 # shell=False so that only 1 subprocess is spawned. 95 # if shell=True, a shell process is spawned, which in turn spawns the process running 96 # the command. Since we do not have a handle to the 2nd process, we cannot terminate it. 97 if pipe_stdout: 98 self.proc_handle = subprocess.Popen( 99 command, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False) 100 else: 101 self.proc_handle = subprocess.Popen(command, env=env, shell=False) 102 self._logger = logger 103 104 def Join(self, timeout): 105 self._logger.debug('Joining with subprocess %d, timeout %s' % (self.Pid(), str(timeout))) 106 output = self.proc_handle.communicate(timeout=timeout)[0] 107 if output: 108 output = output.decode('utf-8') 109 else: 110 output = '' 111 return self.proc_handle.returncode, output 112 113 def Pid(self): 114 return self.proc_handle.pid 115 116 def Kill(self): 117 self.proc_handle.terminate() 118 self.proc_handle.wait() 119 120 121# class that manages all child processes of a process. Any process thats spawns subprocesses 122# should have this. This object is created inside the main process, and each worker process. 123class ChildProcessesManager(): 124 125 @classmethod 126 def _GetGnAndNinjaAbsolutePaths(self): 127 path = os.path.join('third_party', 'depot_tools') 128 return os.path.join(path, winext('gn', 'bat')), os.path.join(path, winext('ninja', 'exe')) 129 130 def __init__(self, args, logger, ninja_lock): 131 # a dictionary of Subprocess, with pid as key 132 self.subprocesses = {} 133 # list of Python multiprocess.Process handles 134 self.workers = [] 135 136 self._gn_path, self._ninja_path = self._GetGnAndNinjaAbsolutePaths() 137 self._use_goma = AutodetectGoma() 138 self._logger = logger 139 self._ninja_lock = ninja_lock 140 self.runtimes = {} 141 self._args = args 142 143 def RunSubprocess(self, command, env=None, pipe_stdout=True, timeout=None): 144 proc = SubProcess(command, self._logger, env, pipe_stdout) 145 self._logger.debug('Created subprocess: %s with pid %d' % (' '.join(command), proc.Pid())) 146 self.subprocesses[proc.Pid()] = proc 147 start_time = time.time() 148 try: 149 returncode, output = self.subprocesses[proc.Pid()].Join(timeout) 150 elapsed_time = time.time() - start_time 151 cmd_name = os.path.basename(command[0]) 152 self.runtimes.setdefault(cmd_name, 0.0) 153 self.runtimes[cmd_name] += elapsed_time 154 self.RemoveSubprocess(proc.Pid()) 155 if returncode != 0: 156 return -1, output 157 return returncode, output 158 except KeyboardInterrupt: 159 raise 160 except subprocess.TimeoutExpired as e: 161 self.RemoveSubprocess(proc.Pid()) 162 return -2, str(e) 163 except Exception as e: 164 self.RemoveSubprocess(proc.Pid()) 165 return -1, str(e) 166 167 def RemoveSubprocess(self, subprocess_id): 168 assert subprocess_id in self.subprocesses 169 self.subprocesses[subprocess_id].Kill() 170 del self.subprocesses[subprocess_id] 171 172 def AddWorker(self, worker): 173 self.workers.append(worker) 174 175 def KillAll(self): 176 for subprocess_id in self.subprocesses: 177 self.subprocesses[subprocess_id].Kill() 178 for worker in self.workers: 179 worker.terminate() 180 worker.join() 181 worker.close() # to release file descriptors immediately 182 self.subprocesses = {} 183 self.workers = [] 184 185 def JoinWorkers(self): 186 for worker in self.workers: 187 worker.join() 188 worker.close() 189 self.workers = [] 190 191 def IsAnyWorkerAlive(self): 192 return any([worker.is_alive() for worker in self.workers]) 193 194 def GetRemainingWorkers(self): 195 count = 0 196 for worker in self.workers: 197 if worker.is_alive(): 198 count += 1 199 return count 200 201 def RunGNGen(self, build_dir, pipe_stdout, extra_gn_args=[]): 202 gn_args = [('angle_with_capture_by_default', 'true')] + extra_gn_args 203 if self._use_goma: 204 gn_args.append(('use_goma', 'true')) 205 if self._args.goma_dir: 206 gn_args.append(('goma_dir', '"%s"' % self._args.goma_dir)) 207 if not self._args.debug: 208 gn_args.append(('is_debug', 'false')) 209 gn_args.append(('symbol_level', '1')) 210 gn_args.append(('angle_assert_always_on', 'true')) 211 if self._args.asan: 212 gn_args.append(('is_asan', 'true')) 213 args_str = ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args]) 214 cmd = [self._gn_path, 'gen', '--args=%s' % args_str, build_dir] 215 self._logger.info(' '.join(cmd)) 216 return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout) 217 218 def RunNinja(self, build_dir, target, pipe_stdout): 219 cmd = [self._ninja_path] 220 221 # This code is taken from depot_tools/autoninja.py 222 if self._use_goma: 223 num_cores = multiprocessing.cpu_count() 224 cmd.append('-j') 225 core_multiplier = 40 226 j_value = num_cores * core_multiplier 227 228 if sys.platform.startswith('win'): 229 # On windows, j value higher than 1000 does not improve build performance. 230 j_value = min(j_value, 1000) 231 elif sys.platform == 'darwin': 232 # On Mac, j value higher than 500 causes 'Too many open files' error 233 # (crbug.com/936864). 234 j_value = min(j_value, 500) 235 236 cmd.append('%d' % j_value) 237 else: 238 cmd.append('-l') 239 cmd.append('%d' % os.cpu_count()) 240 241 cmd += ['-C', build_dir, target] 242 with self._ninja_lock: 243 self._logger.info(' '.join(cmd)) 244 return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout) 245 246 247def GetTestsListForFilter(args, test_path, filter, logger): 248 cmd = GetRunCommand(args, test_path) + ["--list-tests", "--gtest_filter=%s" % filter] 249 logger.info('Getting test list from "%s"' % " ".join(cmd)) 250 return subprocess.check_output(cmd, text=True) 251 252 253def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests, 254 logger): 255 output_lines = output.splitlines() 256 tests = [] 257 seen_start_of_tests = False 258 disabled = 0 259 for line in output_lines: 260 l = line.strip() 261 if l == 'Tests list:': 262 seen_start_of_tests = True 263 elif l == 'End tests list.': 264 break 265 elif not seen_start_of_tests: 266 pass 267 elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests: 268 tests.append(l) 269 else: 270 disabled += 1 271 272 logger.info('Found %s tests and %d disabled tests.' % (len(tests), disabled)) 273 return tests 274 275 276def GetRunCommand(args, command): 277 if args.xvfb: 278 return ['vpython', 'testing/xvfb.py', command] 279 else: 280 return [command] 281 282 283class GroupedResult(): 284 Passed = "Pass" 285 Failed = "Fail" 286 TimedOut = "Timeout" 287 Crashed = "Crashed" 288 CompileFailed = "CompileFailed" 289 Skipped = "Skipped" 290 291 ResultTypes = [Passed, Failed, TimedOut, Crashed, CompileFailed, Skipped] 292 293 def __init__(self, resultcode, message, output, tests): 294 self.resultcode = resultcode 295 self.message = message 296 self.output = output 297 self.tests = [] 298 for test in tests: 299 self.tests.append(test) 300 301 302class TestBatchResult(): 303 304 display_output_lines = 20 305 306 def __init__(self, grouped_results, verbose): 307 self.results = {} 308 for result_type in GroupedResult.ResultTypes: 309 self.results[result_type] = [] 310 311 for grouped_result in grouped_results: 312 for test in grouped_result.tests: 313 self.results[grouped_result.resultcode].append(test.full_test_name) 314 315 self.repr_str = "" 316 self.GenerateRepresentationString(grouped_results, verbose) 317 318 def __str__(self): 319 return self.repr_str 320 321 def GenerateRepresentationString(self, grouped_results, verbose): 322 for grouped_result in grouped_results: 323 self.repr_str += grouped_result.resultcode + ": " + grouped_result.message + "\n" 324 for test in grouped_result.tests: 325 self.repr_str += "\t" + test.full_test_name + "\n" 326 if verbose: 327 self.repr_str += grouped_result.output 328 else: 329 if grouped_result.resultcode == GroupedResult.CompileFailed: 330 self.repr_str += TestBatchResult.ExtractErrors(grouped_result.output) 331 elif grouped_result.resultcode != GroupedResult.Passed: 332 self.repr_str += TestBatchResult.GetAbbreviatedOutput(grouped_result.output) 333 334 def ExtractErrors(output): 335 lines = output.splitlines() 336 error_lines = [] 337 for i in range(len(lines)): 338 if ": error:" in lines[i]: 339 error_lines.append(lines[i] + "\n") 340 if i + 1 < len(lines): 341 error_lines.append(lines[i + 1] + "\n") 342 return "".join(error_lines) 343 344 def GetAbbreviatedOutput(output): 345 # Get all lines after and including the last occurance of "Run". 346 lines = output.splitlines() 347 line_count = 0 348 for line_index in reversed(range(len(lines))): 349 line_count += 1 350 if "[ RUN ]" in lines[line_index]: 351 break 352 353 return '\n' + '\n'.join(lines[-line_count:]) + '\n' 354 355 356class Test(): 357 358 def __init__(self, test_name): 359 self.full_test_name = test_name 360 self.params = test_name.split('/')[1] 361 self.context_id = 0 362 self.test_index = -1 # index of test within a test batch 363 self._label = self.full_test_name.replace(".", "_").replace("/", "_") 364 365 def __str__(self): 366 return self.full_test_name + " Params: " + self.params 367 368 def GetLabel(self): 369 return self._label 370 371 def CanRunReplay(self, trace_folder_path): 372 test_files = [] 373 label = self.GetLabel() 374 assert (self.context_id == 0) 375 for f in os.listdir(trace_folder_path): 376 if os.path.isfile(os.path.join(trace_folder_path, f)) and f.startswith(label): 377 test_files.append(f) 378 frame_files_count = 0 379 context_header_count = 0 380 context_source_count = 0 381 source_json_count = 0 382 context_id = 0 383 for f in test_files: 384 if "_frame" in f: 385 frame_files_count += 1 386 elif f.endswith(".json"): 387 source_json_count += 1 388 elif f.endswith(".h"): 389 context_header_count += 1 390 if TRACE_FILE_SUFFIX in f: 391 context = f.split(TRACE_FILE_SUFFIX)[1][:-2] 392 context_id = int(context) 393 elif f.endswith(".cpp"): 394 context_source_count += 1 395 can_run_replay = frame_files_count >= 1 and context_header_count >= 1 \ 396 and context_source_count >= 1 and source_json_count == 1 397 if not can_run_replay: 398 return False 399 self.context_id = context_id 400 return True 401 402 403def _FormatEnv(env): 404 return ' '.join(['%s=%s' % (k, v) for (k, v) in env.items()]) 405 406 407class TestBatch(): 408 409 CAPTURE_FRAME_END = 100 410 411 def __init__(self, args, logger): 412 self.args = args 413 self.tests = [] 414 self.results = [] 415 self.logger = logger 416 417 def SetWorkerId(self, worker_id): 418 self.trace_dir = "%s%d" % (TRACE_FOLDER, worker_id) 419 self.trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, self.trace_dir) 420 421 def RunWithCapture(self, args, child_processes_manager): 422 test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite) 423 424 extra_env = { 425 'ANGLE_CAPTURE_FRAME_END': '{}'.format(self.CAPTURE_FRAME_END), 426 'ANGLE_CAPTURE_SERIALIZE_STATE': '1', 427 'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables', 428 'ANGLE_CAPTURE_ENABLED': '1', 429 'ANGLE_CAPTURE_OUT_DIR': self.trace_folder_path, 430 } 431 432 env = {**os.environ.copy(), **extra_env} 433 434 if not self.args.keep_temp_files: 435 ClearFolderContent(self.trace_folder_path) 436 filt = ':'.join([test.full_test_name for test in self.tests]) 437 438 cmd = GetRunCommand(args, test_exe_path) 439 cmd += ['--gtest_filter=%s' % filt, '--angle-per-test-capture-label'] 440 self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(cmd))) 441 442 returncode, output = child_processes_manager.RunSubprocess( 443 cmd, env, timeout=SUBPROCESS_TIMEOUT) 444 if args.show_capture_stdout: 445 self.logger.info("Capture stdout: %s" % output) 446 if returncode == -1: 447 self.results.append(GroupedResult(GroupedResult.Crashed, "", output, self.tests)) 448 return False 449 elif returncode == -2: 450 self.results.append(GroupedResult(GroupedResult.TimedOut, "", "", self.tests)) 451 return False 452 return True 453 454 def RemoveTestsThatDoNotProduceAppropriateTraceFiles(self): 455 continued_tests = [] 456 skipped_tests = [] 457 for test in self.tests: 458 if not test.CanRunReplay(self.trace_folder_path): 459 skipped_tests.append(test) 460 else: 461 continued_tests.append(test) 462 if len(skipped_tests) > 0: 463 self.results.append( 464 GroupedResult( 465 GroupedResult.Skipped, 466 "Skipping replay since capture didn't produce necessary trace files", "", 467 skipped_tests)) 468 return continued_tests 469 470 def BuildReplay(self, replay_build_dir, composite_file_id, tests, child_processes_manager): 471 # write gni file that holds all the traces files in a list 472 self.CreateTestNamesFile(composite_file_id, tests) 473 474 gn_args = [('angle_build_capture_replay_tests', 'true'), 475 ('angle_capture_replay_test_trace_dir', '"%s"' % self.trace_dir), 476 ('angle_capture_replay_composite_file_id', str(composite_file_id))] 477 returncode, output = child_processes_manager.RunGNGen(replay_build_dir, True, gn_args) 478 if returncode != 0: 479 self.logger.warning('GN failure output: %s' % output) 480 self.results.append( 481 GroupedResult(GroupedResult.CompileFailed, "Build replay failed at gn generation", 482 output, tests)) 483 return False 484 returncode, output = child_processes_manager.RunNinja(replay_build_dir, REPLAY_BINARY, 485 True) 486 if returncode != 0: 487 self.logger.warning('Ninja failure output: %s' % output) 488 self.results.append( 489 GroupedResult(GroupedResult.CompileFailed, "Build replay failed at ninja", output, 490 tests)) 491 return False 492 return True 493 494 def RunReplay(self, replay_build_dir, replay_exe_path, child_processes_manager, tests): 495 extra_env = { 496 'ANGLE_CAPTURE_ENABLED': '0', 497 'ANGLE_FEATURE_OVERRIDES_ENABLED': 'enable_capture_limits', 498 } 499 env = {**os.environ.copy(), **extra_env} 500 501 run_cmd = GetRunCommand(self.args, replay_exe_path) 502 self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(run_cmd))) 503 504 returncode, output = child_processes_manager.RunSubprocess( 505 run_cmd, env, timeout=SUBPROCESS_TIMEOUT) 506 if returncode == -1: 507 cmd = replay_exe_path 508 self.results.append( 509 GroupedResult(GroupedResult.Crashed, "Replay run crashed (%s)" % cmd, output, 510 tests)) 511 return 512 elif returncode == -2: 513 self.results.append( 514 GroupedResult(GroupedResult.TimedOut, "Replay run timed out", output, tests)) 515 return 516 517 output_lines = output.splitlines() 518 passes = [] 519 fails = [] 520 count = 0 521 for output_line in output_lines: 522 words = output_line.split(" ") 523 if len(words) == 3 and words[0] == RESULT_TAG: 524 if int(words[2]) == 0: 525 passes.append(self.FindTestByLabel(words[1])) 526 else: 527 fails.append(self.FindTestByLabel(words[1])) 528 self.logger.info("Context comparison failed: {}".format( 529 self.FindTestByLabel(words[1]))) 530 self.PrintContextDiff(replay_build_dir, words[1]) 531 532 count += 1 533 if len(passes) > 0: 534 self.results.append(GroupedResult(GroupedResult.Passed, "", output, passes)) 535 if len(fails) > 0: 536 self.results.append(GroupedResult(GroupedResult.Failed, "", output, fails)) 537 538 def PrintContextDiff(self, replay_build_dir, test_name): 539 frame = 1 540 while True: 541 capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name, 542 frame) 543 replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame) 544 if os.path.exists(capture_file) and os.path.exists(replay_file): 545 captured_context = open(capture_file, "r").readlines() 546 replayed_context = open(replay_file, "r").readlines() 547 for line in difflib.unified_diff( 548 captured_context, replayed_context, fromfile=capture_file, 549 tofile=replay_file): 550 print(line, end="") 551 else: 552 if frame > self.CAPTURE_FRAME_END: 553 break 554 frame = frame + 1 555 556 def FindTestByLabel(self, label): 557 for test in self.tests: 558 if test.GetLabel() == label: 559 return test 560 return None 561 562 def AddTest(self, test): 563 assert len(self.tests) <= self.args.batch_count 564 test.index = len(self.tests) 565 self.tests.append(test) 566 567 def CreateTestNamesFile(self, composite_file_id, tests): 568 data = {'traces': [test.GetLabel() for test in tests]} 569 names_path = os.path.join(self.trace_folder_path, 'test_names_%d.json' % composite_file_id) 570 with open(names_path, 'w') as f: 571 f.write(json.dumps(data)) 572 573 def __str__(self): 574 repr_str = "TestBatch:\n" 575 for test in self.tests: 576 repr_str += ("\t" + str(test) + "\n") 577 return repr_str 578 579 def __getitem__(self, index): 580 assert index < len(self.tests) 581 return self.tests[index] 582 583 def __iter__(self): 584 return iter(self.tests) 585 586 def GetResults(self): 587 return TestBatchResult(self.results, self.args.verbose) 588 589 590class TestExpectation(): 591 # tests that must not be run as list 592 skipped_for_capture_tests = {} 593 skipped_for_capture_tests_re = {} 594 595 # test expectations for tests that do not pass 596 non_pass_results = {} 597 598 # tests that must run in a one-test batch 599 run_single = {} 600 run_single_re = {} 601 602 flaky_tests = [] 603 604 non_pass_re = {} 605 606 # yapf: disable 607 # we want each pair on one line 608 result_map = { "FAIL" : GroupedResult.Failed, 609 "TIMEOUT" : GroupedResult.TimedOut, 610 "CRASH" : GroupedResult.Crashed, 611 "COMPILE_FAIL" : GroupedResult.CompileFailed, 612 "NOT_RUN" : GroupedResult.Skipped, 613 "SKIP_FOR_CAPTURE" : GroupedResult.Skipped, 614 "PASS" : GroupedResult.Passed} 615 # yapf: enable 616 617 def __init__(self, args): 618 expected_results_filename = "capture_replay_expectations.txt" 619 expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename) 620 self._asan = args.asan 621 with open(expected_results_path, "rt") as f: 622 for line in f: 623 l = line.strip() 624 if l != "" and not l.startswith("#"): 625 self.ReadOneExpectation(l, args.debug) 626 627 def _CheckTagsWithConfig(self, tags, config_tags): 628 for tag in tags: 629 if tag not in config_tags: 630 return False 631 return True 632 633 def ReadOneExpectation(self, line, is_debug): 634 (testpattern, result) = line.split('=') 635 (test_info_string, test_name_string) = testpattern.split(':') 636 test_name = test_name_string.strip() 637 test_info = test_info_string.strip().split() 638 result_stripped = result.strip() 639 640 tags = [] 641 if len(test_info) > 1: 642 tags = test_info[1:] 643 644 config_tags = [GetPlatformForSkip()] 645 if self._asan: 646 config_tags += ['ASAN'] 647 if is_debug: 648 config_tags += ['DEBUG'] 649 650 if self._CheckTagsWithConfig(tags, config_tags): 651 test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$') 652 if result_stripped == 'CRASH' or result_stripped == 'COMPILE_FAIL': 653 self.run_single[test_name] = self.result_map[result_stripped] 654 self.run_single_re[test_name] = test_name_regex 655 if result_stripped == 'SKIP_FOR_CAPTURE' or result_stripped == 'TIMEOUT': 656 self.skipped_for_capture_tests[test_name] = self.result_map[result_stripped] 657 self.skipped_for_capture_tests_re[test_name] = test_name_regex 658 elif result_stripped == 'FLAKY': 659 self.flaky_tests.append(test_name_regex) 660 else: 661 self.non_pass_results[test_name] = self.result_map[result_stripped] 662 self.non_pass_re[test_name] = test_name_regex 663 664 def TestIsSkippedForCapture(self, test_name): 665 for p in self.skipped_for_capture_tests_re.values(): 666 m = p.match(test_name) 667 if m is not None: 668 return True 669 return False 670 671 def TestNeedsToRunSingle(self, test_name): 672 for p in self.run_single_re.values(): 673 m = p.match(test_name) 674 if m is not None: 675 return True 676 for p in self.skipped_for_capture_tests_re.values(): 677 m = p.match(test_name) 678 if m is not None: 679 return True 680 return False 681 682 def Filter(self, test_list, run_all_tests): 683 result = {} 684 for t in test_list: 685 for key in self.non_pass_results.keys(): 686 if self.non_pass_re[key].match(t) is not None: 687 result[t] = self.non_pass_results[key] 688 for key in self.run_single.keys(): 689 if self.run_single_re[key].match(t) is not None: 690 result[t] = self.run_single[key] 691 if run_all_tests: 692 for [key, r] in self.skipped_for_capture_tests.items(): 693 if self.skipped_for_capture_tests_re[key].match(t) is not None: 694 result[t] = r 695 return result 696 697 def IsFlaky(self, test_name): 698 for flaky in self.flaky_tests: 699 if flaky.match(test_name) is not None: 700 return True 701 return False 702 703 704def ClearFolderContent(path): 705 all_files = [] 706 for f in os.listdir(path): 707 if os.path.isfile(os.path.join(path, f)): 708 os.remove(os.path.join(path, f)) 709 710def SetCWDToAngleFolder(): 711 cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) 712 os.chdir(cwd) 713 return cwd 714 715 716def RunTests(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock): 717 replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % worker_id) 718 replay_exec_path = os.path.join(replay_build_dir, REPLAY_BINARY) 719 720 child_processes_manager = ChildProcessesManager(args, logger, ninja_lock) 721 # used to differentiate between multiple composite files when there are multiple test batchs 722 # running on the same worker and --deleted_trace is set to False 723 composite_file_id = 1 724 while not job_queue.empty(): 725 try: 726 test_batch = job_queue.get() 727 logger.info('Starting {} tests on worker {}. Unstarted jobs: {}'.format( 728 len(test_batch.tests), worker_id, job_queue.qsize())) 729 730 test_batch.SetWorkerId(worker_id) 731 732 success = test_batch.RunWithCapture(args, child_processes_manager) 733 if not success: 734 result_list.append(test_batch.GetResults()) 735 logger.info(str(test_batch.GetResults())) 736 continue 737 continued_tests = test_batch.RemoveTestsThatDoNotProduceAppropriateTraceFiles() 738 if len(continued_tests) == 0: 739 result_list.append(test_batch.GetResults()) 740 logger.info(str(test_batch.GetResults())) 741 continue 742 success = test_batch.BuildReplay(replay_build_dir, composite_file_id, continued_tests, 743 child_processes_manager) 744 if args.keep_temp_files: 745 composite_file_id += 1 746 if not success: 747 result_list.append(test_batch.GetResults()) 748 logger.info(str(test_batch.GetResults())) 749 continue 750 test_batch.RunReplay(replay_build_dir, replay_exec_path, child_processes_manager, 751 continued_tests) 752 result_list.append(test_batch.GetResults()) 753 logger.info(str(test_batch.GetResults())) 754 except KeyboardInterrupt: 755 child_processes_manager.KillAll() 756 raise 757 except queue.Empty: 758 child_processes_manager.KillAll() 759 break 760 except Exception as e: 761 logger.error('RunTestsException: %s\n%s' % (repr(e), traceback.format_exc())) 762 child_processes_manager.KillAll() 763 pass 764 message_queue.put(child_processes_manager.runtimes) 765 child_processes_manager.KillAll() 766 767 768def SafeDeleteFolder(folder_name): 769 while os.path.isdir(folder_name): 770 try: 771 shutil.rmtree(folder_name) 772 except KeyboardInterrupt: 773 raise 774 except PermissionError: 775 pass 776 777 778def DeleteReplayBuildFolders(folder_num, replay_build_dir, trace_folder): 779 for i in range(folder_num): 780 folder_name = replay_build_dir + str(i) 781 if os.path.isdir(folder_name): 782 SafeDeleteFolder(folder_name) 783 784 785def CreateTraceFolders(folder_num): 786 for i in range(folder_num): 787 folder_name = TRACE_FOLDER + str(i) 788 folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name) 789 if os.path.isdir(folder_path): 790 shutil.rmtree(folder_path) 791 os.makedirs(folder_path) 792 793 794def DeleteTraceFolders(folder_num): 795 for i in range(folder_num): 796 folder_name = TRACE_FOLDER + str(i) 797 folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name) 798 if os.path.isdir(folder_path): 799 SafeDeleteFolder(folder_path) 800 801 802def GetPlatformForSkip(): 803 # yapf: disable 804 # we want each pair on one line 805 platform_map = { 'win32' : 'WIN', 806 'linux' : 'LINUX' } 807 # yapf: enable 808 return platform_map.get(sys.platform, 'UNKNOWN') 809 810 811def main(args): 812 logger = multiprocessing.log_to_stderr() 813 logger.setLevel(level=args.log.upper()) 814 815 ninja_lock = multiprocessing.Semaphore(args.max_ninja_jobs) 816 child_processes_manager = ChildProcessesManager(args, logger, ninja_lock) 817 try: 818 start_time = time.time() 819 # set the number of workers to be cpu_count - 1 (since the main process already takes up a 820 # CPU core). Whenever a worker is available, it grabs the next job from the job queue and 821 # runs it. The worker closes down when there is no more job. 822 worker_count = min(multiprocessing.cpu_count() - 1, args.max_jobs) 823 cwd = SetCWDToAngleFolder() 824 825 CreateTraceFolders(worker_count) 826 capture_build_dir = os.path.normpath(r'%s/Capture' % args.out_dir) 827 returncode, output = child_processes_manager.RunGNGen(capture_build_dir, False) 828 if returncode != 0: 829 logger.error(output) 830 child_processes_manager.KillAll() 831 return EXIT_FAILURE 832 # run ninja to build all tests 833 returncode, output = child_processes_manager.RunNinja(capture_build_dir, args.test_suite, 834 False) 835 if returncode != 0: 836 logger.error(output) 837 child_processes_manager.KillAll() 838 return EXIT_FAILURE 839 # get a list of tests 840 test_path = os.path.join(capture_build_dir, args.test_suite) 841 test_list = GetTestsListForFilter(args, test_path, args.filter, logger) 842 test_expectation = TestExpectation(args) 843 test_names = ParseTestNamesFromTestList(test_list, test_expectation, 844 args.also_run_skipped_for_capture_tests, logger) 845 test_expectation_for_list = test_expectation.Filter( 846 test_names, args.also_run_skipped_for_capture_tests) 847 # objects created by manager can be shared by multiple processes. We use it to create 848 # collections that are shared by multiple processes such as job queue or result list. 849 manager = multiprocessing.Manager() 850 job_queue = manager.Queue() 851 test_batch_num = 0 852 853 num_tests = len(test_names) 854 test_index = 0 855 856 # Put the tests into batches and these into the job queue; jobs that areexpected to crash, 857 # timeout, or fail compilation will be run in batches of size one, because a crash or 858 # failing to compile brings down the whole batch, so that we would give false negatives if 859 # such a batch contains jobs that would otherwise poss or fail differently. 860 while test_index < num_tests: 861 batch = TestBatch(args, logger) 862 863 while test_index < num_tests and len(batch.tests) < args.batch_count: 864 test_name = test_names[test_index] 865 test_obj = Test(test_name) 866 867 if test_expectation.TestNeedsToRunSingle(test_name): 868 single_batch = TestBatch(args, logger) 869 single_batch.AddTest(test_obj) 870 job_queue.put(single_batch) 871 test_batch_num += 1 872 else: 873 batch.AddTest(test_obj) 874 875 test_index += 1 876 877 if len(batch.tests) > 0: 878 job_queue.put(batch) 879 test_batch_num += 1 880 881 passed_count = 0 882 failed_count = 0 883 timedout_count = 0 884 crashed_count = 0 885 compile_failed_count = 0 886 skipped_count = 0 887 888 unexpected_count = {} 889 unexpected_test_results = {} 890 891 for type in GroupedResult.ResultTypes: 892 unexpected_count[type] = 0 893 unexpected_test_results[type] = [] 894 895 # result list is created by manager and can be shared by multiple processes. Each 896 # subprocess populates the result list with the results of its test runs. After all 897 # subprocesses finish, the main process processes the results in the result list. 898 # An item in the result list is a tuple with 3 values (testname, result, output). 899 # The "result" can take 3 values "Passed", "Failed", "Skipped". The output is the 900 # stdout and the stderr of the test appended together. 901 result_list = manager.list() 902 message_queue = manager.Queue() 903 # so that we do not spawn more processes than we actually need 904 worker_count = min(worker_count, test_batch_num) 905 # spawning and starting up workers 906 for worker_id in range(worker_count): 907 proc = multiprocessing.Process( 908 target=RunTests, 909 args=(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock)) 910 child_processes_manager.AddWorker(proc) 911 proc.start() 912 913 # print out periodic status messages 914 while child_processes_manager.IsAnyWorkerAlive(): 915 logger.info('%d workers running, %d jobs left.' % 916 (child_processes_manager.GetRemainingWorkers(), (job_queue.qsize()))) 917 # If only a few tests are run it is likely that the workers are finished before 918 # the STATUS_MESSAGE_PERIOD has passed, and the tests script sits idle for the 919 # reminder of the wait time. Therefore, limit waiting by the number of 920 # unfinished jobs. 921 unfinished_jobs = job_queue.qsize() + child_processes_manager.GetRemainingWorkers() 922 time.sleep(min(STATUS_MESSAGE_PERIOD, unfinished_jobs)) 923 924 child_processes_manager.JoinWorkers() 925 end_time = time.time() 926 927 summed_runtimes = child_processes_manager.runtimes 928 while not message_queue.empty(): 929 runtimes = message_queue.get() 930 for k, v in runtimes.items(): 931 summed_runtimes.setdefault(k, 0.0) 932 summed_runtimes[k] += v 933 934 # print out results 935 logger.info('') 936 logger.info('Results:') 937 938 flaky_results = [] 939 940 regression_error_log = [] 941 942 for test_batch in result_list: 943 test_batch_result = test_batch.results 944 logger.debug(str(test_batch_result)) 945 946 batch_has_regression = False 947 948 passed_count += len(test_batch_result[GroupedResult.Passed]) 949 failed_count += len(test_batch_result[GroupedResult.Failed]) 950 timedout_count += len(test_batch_result[GroupedResult.TimedOut]) 951 crashed_count += len(test_batch_result[GroupedResult.Crashed]) 952 compile_failed_count += len(test_batch_result[GroupedResult.CompileFailed]) 953 skipped_count += len(test_batch_result[GroupedResult.Skipped]) 954 955 for real_result, test_list in test_batch_result.items(): 956 for test in test_list: 957 if test_expectation.IsFlaky(test): 958 flaky_results.append('{} ({})'.format(test, real_result)) 959 continue 960 961 # Passing tests are not in the list 962 if test not in test_expectation_for_list.keys(): 963 if real_result != GroupedResult.Passed: 964 batch_has_regression = True 965 unexpected_count[real_result] += 1 966 unexpected_test_results[real_result].append( 967 '{} {} (expected Pass or is new test)'.format(test, real_result)) 968 else: 969 expected_result = test_expectation_for_list[test] 970 if real_result != expected_result: 971 if real_result != GroupedResult.Passed: 972 batch_has_regression = True 973 unexpected_count[real_result] += 1 974 unexpected_test_results[real_result].append( 975 '{} {} (expected {})'.format(test, real_result, expected_result)) 976 if batch_has_regression: 977 regression_error_log.append(str(test_batch)) 978 979 if len(regression_error_log) > 0: 980 logger.info('Logging output of test batches with regressions') 981 logger.info( 982 '===================================================================================================' 983 ) 984 for log in regression_error_log: 985 logger.info(log) 986 logger.info( 987 '---------------------------------------------------------------------------------------------------' 988 ) 989 logger.info('') 990 991 logger.info('') 992 logger.info('Elapsed time: %.2lf seconds' % (end_time - start_time)) 993 logger.info('') 994 logger.info('Runtimes by process:\n%s' % 995 '\n'.join('%s: %.2lf seconds' % (k, v) for (k, v) in summed_runtimes.items())) 996 997 if len(flaky_results): 998 logger.info("Flaky test(s):") 999 for line in flaky_results: 1000 logger.info(" {}".format(line)) 1001 logger.info("") 1002 1003 logger.info( 1004 'Summary: Passed: %d, Comparison Failed: %d, Crashed: %d, CompileFailed %d, Skipped: %d, Timeout: %d' 1005 % (passed_count, failed_count, crashed_count, compile_failed_count, skipped_count, 1006 timedout_count)) 1007 1008 retval = EXIT_SUCCESS 1009 1010 unexpected_test_results_count = 0 1011 for count in unexpected_count.values(): 1012 unexpected_test_results_count += count 1013 1014 if unexpected_test_results_count > 0: 1015 retval = EXIT_FAILURE 1016 logger.info('') 1017 logger.info('Failure: Obtained {} results that differ from expectation:'.format( 1018 unexpected_test_results_count)) 1019 logger.info('') 1020 for result, count in unexpected_count.items(): 1021 if count > 0: 1022 logger.info("Unexpected '{}' ({}):".format(result, count)) 1023 for test_result in unexpected_test_results[result]: 1024 logger.info(' {}'.format(test_result)) 1025 logger.info('') 1026 1027 logger.info('') 1028 1029 # delete generated folders if --keep-temp-files flag is set to false 1030 if args.purge: 1031 DeleteTraceFolders(worker_count) 1032 if os.path.isdir(args.out_dir): 1033 SafeDeleteFolder(args.out_dir) 1034 1035 # Try hard to ensure output is finished before ending the test. 1036 logging.shutdown() 1037 sys.stdout.flush() 1038 time.sleep(2.0) 1039 return retval 1040 1041 except KeyboardInterrupt: 1042 child_processes_manager.KillAll() 1043 return EXIT_FAILURE 1044 1045 1046if __name__ == '__main__': 1047 parser = argparse.ArgumentParser() 1048 parser.add_argument( 1049 '--out-dir', 1050 default=DEFAULT_OUT_DIR, 1051 help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".' 1052 % DEFAULT_OUT_DIR) 1053 parser.add_argument( 1054 '-f', 1055 '--filter', 1056 '--gtest_filter', 1057 default=DEFAULT_FILTER, 1058 help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER) 1059 parser.add_argument( 1060 '--test-suite', 1061 default=DEFAULT_TEST_SUITE, 1062 help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE) 1063 parser.add_argument( 1064 '--batch-count', 1065 default=DEFAULT_BATCH_COUNT, 1066 type=int, 1067 help='Number of tests in a batch. Default is %d.' % DEFAULT_BATCH_COUNT) 1068 parser.add_argument( 1069 '--keep-temp-files', 1070 action='store_true', 1071 help='Whether to keep the temp files and folders. Off by default') 1072 parser.add_argument('--purge', help='Purge all build directories on exit.') 1073 parser.add_argument( 1074 '--goma-dir', 1075 default='', 1076 help='Set custom goma directory. Uses the goma in path by default.') 1077 parser.add_argument( 1078 '--output-to-file', 1079 action='store_true', 1080 help='Whether to write output to a result file. Off by default') 1081 parser.add_argument( 1082 '--result-file', 1083 default=DEFAULT_RESULT_FILE, 1084 help='Name of the result file in the capture_replay_tests folder. Default is "%s".' % 1085 DEFAULT_RESULT_FILE) 1086 parser.add_argument('-v', '--verbose', action='store_true', help='Shows full test output.') 1087 parser.add_argument( 1088 '-l', 1089 '--log', 1090 default=DEFAULT_LOG_LEVEL, 1091 help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL) 1092 parser.add_argument( 1093 '-j', 1094 '--max-jobs', 1095 default=DEFAULT_MAX_JOBS, 1096 type=int, 1097 help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS) 1098 parser.add_argument( 1099 '-a', 1100 '--also-run-skipped-for-capture-tests', 1101 action='store_true', 1102 help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE') 1103 parser.add_argument( 1104 '--max-ninja-jobs', 1105 type=int, 1106 default=DEFAULT_MAX_NINJA_JOBS, 1107 help='Maximum number of concurrent ninja jobs to run at once.') 1108 parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.') 1109 parser.add_argument('--asan', action='store_true', help='Build with ASAN.') 1110 parser.add_argument( 1111 '--show-capture-stdout', action='store_true', help='Print test stdout during capture.') 1112 parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).') 1113 args = parser.parse_args() 1114 if args.debug and (args.out_dir == DEFAULT_OUT_DIR): 1115 args.out_dir = args.out_dir + "Debug" 1116 1117 if sys.platform == "win32": 1118 args.test_suite += ".exe" 1119 if args.output_to_file: 1120 logging.basicConfig(level=args.log.upper(), filename=args.result_file) 1121 else: 1122 logging.basicConfig(level=args.log.upper()) 1123 1124 sys.exit(main(args)) 1125