3 # Parallel VM test case executor
4 # Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
6 # This software may be distributed under the terms of the BSD license.
7 # See README for more details.
17 logger = logging.getLogger()
19 # Test cases that take significantly longer time to execute than average.
20 long_tests = [ "ap_roam_open",
21 "wpas_mesh_password_mismatch_retry",
22 "wpas_mesh_password_mismatch",
23 "hostapd_oom_wpa2_psk_connect",
24 "ap_hs20_fetch_osu_stop",
27 "nfc_wps_er_handover_pk_hash_mismatch_sta",
28 "go_neg_peers_force_diff_freq",
31 "ap_pmf_sta_unprot_deauth_burst",
32 "ap_bss_add_remove_during_ht_scan",
34 "autoscan_exponential",
37 "ap_inactivity_disconnect",
38 "scan_bss_expiration_age",
40 "discovery_group_client",
46 "ap_ht_40mhz_intolerant_ap",
48 "discovery_pd_retries",
49 "ap_wps_setup_locked_timeout",
53 "grpform_cred_ready_timeout",
54 "hostapd_oom_wpa2_eap_connect",
57 "hostapd_oom_wpa2_eap",
59 "proxyarp_open_ebtables",
61 "obss_scan_40_intolerant",
65 "ap_wps_iteration_error",
67 "ap_wps_http_timeout",
68 "p2p_go_move_reg_change",
71 "p2p_go_move_scm_peer_supports",
72 "p2p_go_move_scm_peer_does_not_support",
73 "p2p_go_move_scm_multi" ]
77 for i in range(num_servers):
78 failed += vm[i]['failed']
81 def vm_read_stdout(vm, i):
82 global total_started, total_passed, total_failed, total_skipped
87 out = vm['proc'].stdout.read()
90 logger.debug("VM[%d] stdout.read[%s]" % (i, out))
91 pending = vm['pending'] + out
94 pos = pending.find('\n')
97 line = pending[0:pos].rstrip()
98 pending = pending[(pos + 1):]
99 logger.debug("VM[%d] stdout full line[%s]" % (i, line))
100 if line.startswith("READY"):
102 elif line.startswith("PASS"):
105 elif line.startswith("FAIL"):
108 vals = line.split(' ')
110 logger.info("VM[%d] incomplete FAIL line: %s" % (i, line))
114 logger.debug("VM[%d] test case failed: %s" % (i, name))
115 vm['failed'].append(name)
116 elif line.startswith("NOT-FOUND"):
119 logger.info("VM[%d] test case not found" % i)
120 elif line.startswith("SKIP"):
123 elif line.startswith("START"):
125 if len(vm['failed']) == 0:
126 vals = line.split(' ')
128 vm['fail_seq'].append(vals[1])
129 vm['out'] += line + '\n'
131 vm['pending'] = pending
134 def show_progress(scr):
140 global first_run_failures
141 global total_started, total_passed, total_failed, total_skipped
143 total_tests = len(tests)
144 logger.info("Total tests: %d" % total_tests)
147 scr.addstr(0, 0, "Parallel test execution status", curses.A_BOLD)
148 for i in range(0, num_servers):
149 scr.addstr(i + 1, 0, "VM %d:" % (i + 1), curses.A_BOLD)
150 scr.addstr(i + 1, 10, "starting VM")
151 scr.addstr(num_servers + 1, 0, "Total:", curses.A_BOLD)
152 scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED=0 PASS=0 FAIL=0 SKIP=0".format(total_tests))
155 completed_first_pass = False
160 first_running = False
163 for i in range(0, num_servers):
164 if completed_first_pass:
166 if vm[i]['first_run_done']:
168 if not vm[i]['proc']:
170 if vm[i]['proc'].poll() is not None:
174 log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
175 with open(log, 'r') as f:
176 if "Kernel panic" in f.read():
177 scr.addstr("kernel panic")
178 logger.info("VM[%d] kernel panic" % i)
180 scr.addstr("unexpected exit")
181 logger.info("VM[%d] unexpected exit" % i)
188 err = vm[i]['proc'].stderr.read()
190 logger.debug("VM[%d] stderr.read[%s]" % (i, err))
194 if vm_read_stdout(vm[i], i):
199 vm[i]['first_run_done'] = True
200 scr.addstr("completed first round")
201 logger.info("VM[%d] completed first round" % i)
205 vm[i]['proc'].stdin.write(name + '\n')
207 logger.debug("VM[%d] start test %s" % (i, name))
209 if not first_running and not completed_first_pass:
210 logger.info("First round of testing completed")
212 logger.info("Unexpected test cases remaining from first round: " + str(tests))
213 raise Exception("Unexpected test cases remaining from first round")
214 completed_first_pass = True
215 for name in get_failed(vm):
217 rerun_tests.append(name)
218 first_run_failures.append(name)
220 for i in range(num_servers):
221 if not completed_first_pass:
223 if not vm[i]['proc']:
225 if vm[i]['proc'].poll() is not None:
229 log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
230 with open(log, 'r') as f:
231 if "Kernel panic" in f.read():
232 scr.addstr("kernel panic")
233 logger.info("VM[%d] kernel panic" % i)
235 scr.addstr("completed run")
236 logger.info("VM[%d] completed run" % i)
242 err = vm[i]['proc'].stderr.read()
244 logger.debug("VM[%d] stderr.read[%s]" % (i, err))
249 if vm[i]['first_run_done']:
250 vm[i]['first_run_done'] = False
253 ready = vm_read_stdout(vm[i], i)
259 vm[i]['proc'].stdin.write('\n')
260 scr.addstr("shutting down")
261 logger.info("VM[%d] shutting down" % i)
263 name = rerun_tests.pop(0)
264 vm[i]['proc'].stdin.write(name + '\n')
265 scr.addstr(name + "(*)")
266 logger.debug("VM[%d] start test %s (*)" % (i, name))
272 scr.move(num_servers + 1, 10)
274 scr.addstr("{} %".format(int(100.0 * (total_passed + total_failed + total_skipped) / total_tests)))
275 scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED={} PASS={} FAIL={} SKIP={}".format(total_tests, total_started, total_passed, total_failed, total_skipped))
276 failed = get_failed(vm)
278 scr.move(num_servers + 2, 0)
280 scr.addstr("Failed test cases: ")
294 scr.addstr("(RETRY FAILED %d)" % len(rerun_tests))
297 elif first_run_failures:
298 scr.addstr("(RETRY FAILED)")
315 global first_run_failures
316 global total_started, total_passed, total_failed, total_skipped
317 global rerun_failures
324 debug_level = logging.INFO
325 rerun_failures = True
326 timestamp = int(time.time())
328 scriptsdir = os.path.dirname(os.path.realpath(sys.argv[0]))
330 p = argparse.ArgumentParser(description='run multiple testing VMs in parallel')
331 p.add_argument('num_servers', metavar='number of VMs', type=int, choices=range(1, 100),
332 help="number of VMs to start")
333 p.add_argument('-f', dest='testmodules', metavar='<test module>',
334 help='execute only tests from these test modules',
336 p.add_argument('-1', dest='no_retry', action='store_const', const=True, default=False,
337 help="don't retry failed tests automatically")
338 p.add_argument('--debug', dest='debug', action='store_const', const=True, default=False,
339 help="enable debug logging")
340 p.add_argument('--codecov', dest='codecov', action='store_const', const=True, default=False,
341 help="enable code coverage collection")
342 p.add_argument('--shuffle-tests', dest='shuffle', action='store_const', const=True, default=False,
343 help="shuffle test cases to randomize order")
344 p.add_argument('--short', dest='short', action='store_const', const=True,
346 help="only run short-duration test cases")
347 p.add_argument('--long', dest='long', action='store_const', const=True,
349 help="include long-duration test cases")
350 p.add_argument('--valgrind', dest='valgrind', action='store_const',
351 const=True, default=False,
352 help="run tests under valgrind")
353 p.add_argument('params', nargs='*')
354 args = p.parse_args()
355 num_servers = args.num_servers
356 rerun_failures = not args.no_retry
358 debug_level = logging.DEBUG
361 extra_args += [ '--valgrind' ]
363 extra_args += [ '--long' ]
365 print "Code coverage - build separate binaries"
366 logdir = "/tmp/hwsim-test-logs/" + str(timestamp)
368 subprocess.check_call([os.path.join(scriptsdir, 'build-codecov.sh'),
370 codecov_args = ['--codecov_dir', logdir]
376 first_run_failures = []
381 cmd = [ os.path.join(os.path.dirname(scriptsdir), 'run-tests.py'),
385 cmd += args.testmodules
386 lst = subprocess.Popen(cmd, stdout=subprocess.PIPE)
387 for l in lst.stdout.readlines():
388 name = l.split(' ')[0]
391 sys.exit("No test cases selected")
393 dir = '/tmp/hwsim-test-logs'
400 from random import shuffle
402 elif num_servers > 2 and len(tests) > 100:
403 # Move test cases with long duration to the beginning as an
404 # optimization to avoid last part of the test execution running a long
405 # duration test case on a single VM while all other VMs have already
406 # completed their work.
412 tests = [t for t in tests if t not in long_tests]
414 logger.setLevel(debug_level)
415 log_handler = logging.FileHandler('parallel-vm.log')
416 log_handler.setLevel(debug_level)
417 fmt = "%(asctime)s %(levelname)s %(message)s"
418 log_formatter = logging.Formatter(fmt)
419 log_handler.setFormatter(log_formatter)
420 logger.addHandler(log_handler)
423 for i in range(0, num_servers):
424 print("\rStarting virtual machine {}/{}".format(i + 1, num_servers)),
425 logger.info("Starting virtual machine {}/{}".format(i + 1, num_servers))
426 cmd = [os.path.join(scriptsdir, 'vm-run.sh'), '--delay', str(i),
427 '--timestamp', str(timestamp),
428 '--ext', 'srv.%d' % (i + 1),
429 '-i'] + codecov_args + extra_args
431 vm[i]['first_run_done'] = False
432 vm[i]['proc'] = subprocess.Popen(cmd,
433 stdin=subprocess.PIPE,
434 stdout=subprocess.PIPE,
435 stderr=subprocess.PIPE)
437 vm[i]['pending'] = ""
440 vm[i]['fail_seq'] = []
441 for stream in [ vm[i]['proc'].stdout, vm[i]['proc'].stderr ]:
443 fl = fcntl.fcntl(fd, fcntl.F_GETFL)
444 fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
447 curses.wrapper(show_progress)
449 with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
450 for i in range(0, num_servers):
451 f.write('VM {}\n{}\n{}\n'.format(i, vm[i]['out'], vm[i]['err']))
453 failed = get_failed(vm)
455 if first_run_failures:
456 print "To re-run same failure sequence(s):"
457 for i in range(0, num_servers):
458 if len(vm[i]['failed']) == 0:
460 print "./parallel-vm.py -1 1",
461 skip = len(vm[i]['fail_seq'])
462 skip -= min(skip, 30)
463 for t in vm[i]['fail_seq']:
469 print "Failed test cases:"
470 for f in first_run_failures:
472 logger.info("Failed: " + f)
476 double_failed.append(name)
477 for test in first_run_failures:
478 double_failed.remove(test)
479 if not rerun_failures:
481 elif failed and not double_failed:
482 print "All failed cases passed on retry"
483 logger.info("All failed cases passed on retry")
485 print "Failed even on retry:"
486 for f in double_failed:
488 logger.info("Failed on retry: " + f)
490 res = "TOTAL={} PASS={} FAIL={} SKIP={}".format(total_started,
496 print "Logs: " + dir + '/' + str(timestamp)
497 logger.info("Logs: " + dir + '/' + str(timestamp))
499 for i in range(0, num_servers):
500 if len(vm[i]['pending']) > 0:
501 logger.info("Unprocessed stdout from VM[%d]: '%s'" %
502 (i, vm[i]['pending']))
503 log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
504 with open(log, 'r') as f:
505 if "Kernel panic" in f.read():
506 print "Kernel panic in " + log
507 logger.info("Kernel panic in " + log)
510 print "Code coverage - preparing report"
511 for i in range(num_servers):
512 subprocess.check_call([os.path.join(scriptsdir,
513 'process-codecov.sh'),
514 logdir + ".srv.%d" % (i + 1),
516 subprocess.check_call([os.path.join(scriptsdir, 'combine-codecov.sh'),
518 print "file://%s/index.html" % logdir
519 logger.info("Code coverage report: file://%s/index.html" % logdir)
521 if double_failed or (failed and not rerun_failures):
522 logger.info("Test run complete - failures found")
525 logger.info("Test run complete - failures found on first run; passed on retry")
527 logger.info("Test run complete - no failures")
530 if __name__ == "__main__":