Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

571 lines
17 KiB

  1. package Test::Harness;
  2. use 5.005_64;
  3. use Exporter;
  4. use Benchmark;
  5. use Config;
  6. use FileHandle;
  7. use strict;
  8. our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
  9. $columns, @ISA, @EXPORT, @EXPORT_OK);
  10. $have_devel_corestack = 0;
  11. $VERSION = "1.1604";
  12. $ENV{HARNESS_ACTIVE} = 1;
  13. # Some experimental versions of OS/2 build have broken $?
  14. my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
  15. my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
  16. my $tests_skipped = 0;
  17. my $subtests_skipped = 0;
  18. @ISA=('Exporter');
  19. @EXPORT= qw(&runtests);
  20. @EXPORT_OK= qw($verbose $switches);
  21. $verbose = 0;
  22. $switches = "-w";
  23. $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
  24. sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
  25. sub runtests {
  26. my(@tests) = @_;
  27. local($|) = 1;
  28. my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
  29. my $totmax = 0;
  30. my $totok = 0;
  31. my $files = 0;
  32. my $bad = 0;
  33. my $good = 0;
  34. my $total = @tests;
  35. # pass -I flags to children
  36. my $old5lib = $ENV{PERL5LIB};
  37. # VMS has a 255-byte limit on the length of %ENV entries, so
  38. # toss the ones that involve perl_root, the install location
  39. # for VMS
  40. my $new5lib;
  41. if ($^O eq 'VMS') {
  42. $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
  43. $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
  44. }
  45. else {
  46. $new5lib = join($Config{path_sep}, @INC);
  47. }
  48. local($ENV{'PERL5LIB'}) = $new5lib;
  49. my @dir_files = globdir $files_in_dir if defined $files_in_dir;
  50. my $t_start = new Benchmark;
  51. while ($test = shift(@tests)) {
  52. $te = $test;
  53. chop($te);
  54. if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
  55. my $blank = (' ' x 77);
  56. my $leader = "$te" . '.' x (20 - length($te));
  57. my $ml = "";
  58. $ml = "\r$blank\r$leader"
  59. if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
  60. print $leader;
  61. my $fh = new FileHandle;
  62. $fh->open($test) or print "can't open $test. $!\n";
  63. my $first = <$fh>;
  64. my $s = $switches;
  65. $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
  66. if exists $ENV{'HARNESS_PERL_SWITCHES'};
  67. $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
  68. if $first =~ /^#!.*\bperl.*-\w*T/;
  69. $fh->close or print "can't close $test. $!\n";
  70. my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
  71. ? "./perl -I../lib ../utils/perlcc $test "
  72. . "-r 2>> ./compilelog |"
  73. : "$^X $s $test|";
  74. $cmd = "MCR $cmd" if $^O eq 'VMS';
  75. $fh->open($cmd) or print "can't run $test. $!\n";
  76. $ok = $next = $max = 0;
  77. @failed = ();
  78. my %todo = ();
  79. my $bonus = 0;
  80. my $skipped = 0;
  81. my $skip_reason;
  82. while (<$fh>) {
  83. if( $verbose ){
  84. print $_;
  85. }
  86. if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
  87. $max = $1;
  88. for (split(/\s+/, $2)) { $todo{$_} = 1; }
  89. $totmax += $max;
  90. $files++;
  91. $next = 1;
  92. } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
  93. $max = $1;
  94. $totmax += $max;
  95. $files++;
  96. $next = 1;
  97. $skip_reason = $3 if not $max and defined $3;
  98. } elsif ($max && /^(not\s+)?ok\b/) {
  99. my $this = $next;
  100. if (/^not ok\s*(\d*)/){
  101. $this = $1 if $1 > 0;
  102. print "${ml}NOK $this" if $ml;
  103. if (!$todo{$this}) {
  104. push @failed, $this;
  105. } else {
  106. $ok++;
  107. $totok++;
  108. }
  109. } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
  110. $this = $1 if $1 > 0;
  111. print "${ml}ok $this/$max" if $ml;
  112. $ok++;
  113. $totok++;
  114. $skipped++ if defined $2;
  115. my $reason;
  116. $reason = 'unknown reason' if defined $2;
  117. $reason = $3 if defined $3;
  118. if (defined $reason and defined $skip_reason) {
  119. # print "was: '$skip_reason' new '$reason'\n";
  120. $skip_reason = 'various reasons'
  121. if $skip_reason ne $reason;
  122. } elsif (defined $reason) {
  123. $skip_reason = $reason;
  124. }
  125. $bonus++, $totbonus++ if $todo{$this};
  126. }
  127. if ($this > $next) {
  128. # print "Test output counter mismatch [test $this]\n";
  129. # no need to warn probably
  130. push @failed, $next..$this-1;
  131. } elsif ($this < $next) {
  132. #we have seen more "ok" lines than the number suggests
  133. print "Confused test output: test $this answered after test ", $next-1, "\n";
  134. $next = $this;
  135. }
  136. $next = $this + 1;
  137. }
  138. }
  139. $fh->close; # must close to reap child resource values
  140. my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
  141. my $estatus;
  142. $estatus = ($^O eq 'VMS'
  143. ? eval 'use vmsish "status"; $estatus = $?'
  144. : $wstatus >> 8);
  145. if ($wstatus) {
  146. my ($failed, $canon, $percent) = ('??', '??');
  147. printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
  148. $wstatus,$wstatus;
  149. print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
  150. if (corestatus($wstatus)) { # until we have a wait module
  151. if ($have_devel_corestack) {
  152. Devel::CoreStack::stack($^X);
  153. } else {
  154. print "\ttest program seems to have generated a core\n";
  155. }
  156. }
  157. $bad++;
  158. if ($max) {
  159. if ($next == $max + 1 and not @failed) {
  160. print "\tafter all the subtests completed successfully\n";
  161. $percent = 0;
  162. $failed = 0; # But we do not set $canon!
  163. } else {
  164. push @failed, $next..$max;
  165. $failed = @failed;
  166. (my $txt, $canon) = canonfailed($max,$skipped,@failed);
  167. $percent = 100*(scalar @failed)/$max;
  168. print "DIED. ",$txt;
  169. }
  170. }
  171. $failedtests{$test} = { canon => $canon, max => $max || '??',
  172. failed => $failed,
  173. name => $test, percent => $percent,
  174. estat => $estatus, wstat => $wstatus,
  175. };
  176. } elsif ($ok == $max && $next == $max+1) {
  177. if ($max and $skipped + $bonus) {
  178. my @msg;
  179. push(@msg, "$skipped/$max skipped: $skip_reason")
  180. if $skipped;
  181. push(@msg, "$bonus/$max unexpectedly succeeded")
  182. if $bonus;
  183. print "${ml}ok, ".join(', ', @msg)."\n";
  184. } elsif ($max) {
  185. print "${ml}ok\n";
  186. } elsif (defined $skip_reason) {
  187. print "skipped: $skip_reason\n";
  188. $tests_skipped++;
  189. } else {
  190. print "skipped test on this platform\n";
  191. $tests_skipped++;
  192. }
  193. $good++;
  194. } elsif ($max) {
  195. if ($next <= $max) {
  196. push @failed, $next..$max;
  197. }
  198. if (@failed) {
  199. my ($txt, $canon) = canonfailed($max,$skipped,@failed);
  200. print "${ml}$txt";
  201. $failedtests{$test} = { canon => $canon, max => $max,
  202. failed => scalar @failed,
  203. name => $test, percent => 100*(scalar @failed)/$max,
  204. estat => '', wstat => '',
  205. };
  206. } else {
  207. print "Don't know which tests failed: got $ok ok, expected $max\n";
  208. $failedtests{$test} = { canon => '??', max => $max,
  209. failed => '??',
  210. name => $test, percent => undef,
  211. estat => '', wstat => '',
  212. };
  213. }
  214. $bad++;
  215. } elsif ($next == 0) {
  216. print "FAILED before any test output arrived\n";
  217. $bad++;
  218. $failedtests{$test} = { canon => '??', max => '??',
  219. failed => '??',
  220. name => $test, percent => undef,
  221. estat => '', wstat => '',
  222. };
  223. }
  224. $subtests_skipped += $skipped;
  225. if (defined $files_in_dir) {
  226. my @new_dir_files = globdir $files_in_dir;
  227. if (@new_dir_files != @dir_files) {
  228. my %f;
  229. @f{@new_dir_files} = (1) x @new_dir_files;
  230. delete @f{@dir_files};
  231. my @f = sort keys %f;
  232. print "LEAKED FILES: @f\n";
  233. @dir_files = @new_dir_files;
  234. }
  235. }
  236. }
  237. my $t_total = timediff(new Benchmark, $t_start);
  238. if ($^O eq 'VMS') {
  239. if (defined $old5lib) {
  240. $ENV{PERL5LIB} = $old5lib;
  241. } else {
  242. delete $ENV{PERL5LIB};
  243. }
  244. }
  245. my $bonusmsg = '';
  246. $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
  247. " UNEXPECTEDLY SUCCEEDED)")
  248. if $totbonus;
  249. if ($tests_skipped) {
  250. $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
  251. if ($subtests_skipped) {
  252. $bonusmsg .= " and $subtests_skipped subtest"
  253. . ($subtests_skipped != 1 ? 's' : '');
  254. }
  255. $bonusmsg .= ' skipped';
  256. }
  257. elsif ($subtests_skipped) {
  258. $bonusmsg .= ", $subtests_skipped subtest"
  259. . ($subtests_skipped != 1 ? 's' : '')
  260. . " skipped";
  261. }
  262. if ($bad == 0 && $totmax) {
  263. print "All tests successful$bonusmsg.\n";
  264. } elsif ($total==0){
  265. die "FAILED--no tests were run for some reason.\n";
  266. } elsif ($totmax==0) {
  267. my $blurb = $total==1 ? "script" : "scripts";
  268. die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
  269. } else {
  270. $pct = sprintf("%.2f", $good / $total * 100);
  271. my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
  272. $totmax - $totok, $totmax, 100*$totok/$totmax;
  273. # Create formats
  274. # First, figure out max length of test names
  275. my $failed_str = "Failed Test";
  276. my $middle_str = " Status Wstat Total Fail Failed ";
  277. my $list_str = "List of Failed";
  278. my $max_namelen = length($failed_str);
  279. my $script;
  280. foreach $script (keys %failedtests) {
  281. $max_namelen =
  282. (length $failedtests{$script}->{name} > $max_namelen) ?
  283. length $failedtests{$script}->{name} : $max_namelen;
  284. }
  285. my $list_len = $columns - length($middle_str) - $max_namelen;
  286. if ($list_len < length($list_str)) {
  287. $list_len = length($list_str);
  288. $max_namelen = $columns - length($middle_str) - $list_len;
  289. if ($max_namelen < length($failed_str)) {
  290. $max_namelen = length($failed_str);
  291. $columns = $max_namelen + length($middle_str) + $list_len;
  292. }
  293. }
  294. my $fmt_top = "format STDOUT_TOP =\n"
  295. . sprintf("%-${max_namelen}s", $failed_str)
  296. . $middle_str
  297. . $list_str . "\n"
  298. . "-" x $columns
  299. . "\n.\n";
  300. my $fmt = "format STDOUT =\n"
  301. . "@" . "<" x ($max_namelen - 1)
  302. . " @>> @>>>> @>>>> @>>> ^##.##% "
  303. . "^" . "<" x ($list_len - 1) . "\n"
  304. . '{ $curtest->{name}, $curtest->{estat},'
  305. . ' $curtest->{wstat}, $curtest->{max},'
  306. . ' $curtest->{failed}, $curtest->{percent},'
  307. . ' $curtest->{canon}'
  308. . "\n}\n"
  309. . "~~" . " " x ($columns - $list_len - 2) . "^"
  310. . "<" x ($list_len - 1) . "\n"
  311. . '$curtest->{canon}'
  312. . "\n.\n";
  313. eval $fmt_top;
  314. die $@ if $@;
  315. eval $fmt;
  316. die $@ if $@;
  317. # Now write to formats
  318. for $script (sort keys %failedtests) {
  319. $curtest = $failedtests{$script};
  320. write;
  321. }
  322. if ($bad) {
  323. $bonusmsg =~ s/^,\s*//;
  324. print "$bonusmsg.\n" if $bonusmsg;
  325. die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
  326. }
  327. }
  328. printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
  329. return ($bad == 0 && $totmax) ;
  330. }
  331. my $tried_devel_corestack;
  332. sub corestatus {
  333. my($st) = @_;
  334. eval {require 'wait.ph'};
  335. my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
  336. eval { require Devel::CoreStack; $have_devel_corestack++ }
  337. unless $tried_devel_corestack++;
  338. $ret;
  339. }
  340. sub canonfailed ($@) {
  341. my($max,$skipped,@failed) = @_;
  342. my %seen;
  343. @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
  344. my $failed = @failed;
  345. my @result = ();
  346. my @canon = ();
  347. my $min;
  348. my $last = $min = shift @failed;
  349. my $canon;
  350. if (@failed) {
  351. for (@failed, $failed[-1]) { # don't forget the last one
  352. if ($_ > $last+1 || $_ == $last) {
  353. if ($min == $last) {
  354. push @canon, $last;
  355. } else {
  356. push @canon, "$min-$last";
  357. }
  358. $min = $_;
  359. }
  360. $last = $_;
  361. }
  362. local $" = ", ";
  363. push @result, "FAILED tests @canon\n";
  364. $canon = "@canon";
  365. } else {
  366. push @result, "FAILED test $last\n";
  367. $canon = $last;
  368. }
  369. push @result, "\tFailed $failed/$max tests, ";
  370. push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
  371. my $ender = 's' x ($skipped > 1);
  372. my $good = $max - $failed - $skipped;
  373. my $goodper = sprintf("%.2f",100*($good/$max));
  374. push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
  375. push @result, "\n";
  376. my $txt = join "", @result;
  377. ($txt, $canon);
  378. }
  379. 1;
  380. __END__
  381. =head1 NAME
  382. Test::Harness - run perl standard test scripts with statistics
  383. =head1 SYNOPSIS
  384. use Test::Harness;
  385. runtests(@tests);
  386. =head1 DESCRIPTION
  387. (By using the L<Test> module, you can write test scripts without
  388. knowing the exact output this module expects. However, if you need to
  389. know the specifics, read on!)
  390. Perl test scripts print to standard output C<"ok N"> for each single
  391. test, where C<N> is an increasing sequence of integers. The first line
  392. output by a standard test script is C<"1..M"> with C<M> being the
  393. number of tests that should be run within the test
  394. script. Test::Harness::runtests(@tests) runs all the testscripts
  395. named as arguments and checks standard output for the expected
  396. C<"ok N"> strings.
  397. After all tests have been performed, runtests() prints some
  398. performance statistics that are computed by the Benchmark module.
  399. =head2 The test script output
  400. Any output from the testscript to standard error is ignored and
  401. bypassed, thus will be seen by the user. Lines written to standard
  402. output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
  403. runtests(). All other lines are discarded.
  404. It is tolerated if the test numbers after C<ok> are omitted. In this
  405. case Test::Harness maintains temporarily its own counter until the
  406. script supplies test numbers again. So the following test script
  407. print <<END;
  408. 1..6
  409. not ok
  410. ok
  411. not ok
  412. ok
  413. ok
  414. END
  415. will generate
  416. FAILED tests 1, 3, 6
  417. Failed 3/6 tests, 50.00% okay
  418. The global variable $Test::Harness::verbose is exportable and can be
  419. used to let runtests() display the standard output of the script
  420. without altering the behavior otherwise.
  421. The global variable $Test::Harness::switches is exportable and can be
  422. used to set perl command line options used for running the test
  423. script(s). The default value is C<-w>.
  424. If the standard output line contains substring C< # Skip> (with
  425. variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
  426. counted as a skipped test. If the whole testscript succeeds, the
  427. count of skipped tests is included in the generated output.
  428. C<Test::Harness> reports the text after C< # Skip(whatever)> as a
  429. reason for skipping. Similarly, one can include a similar explanation
  430. in a C<1..0> line emitted if the test is skipped completely:
  431. 1..0 # Skipped: no leverage found
  432. =head1 EXPORT
  433. C<&runtests> is exported by Test::Harness per default.
  434. =head1 DIAGNOSTICS
  435. =over 4
  436. =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
  437. If all tests are successful some statistics about the performance are
  438. printed.
  439. =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
  440. For any single script that has failing subtests statistics like the
  441. above are printed.
  442. =item C<Test returned status %d (wstat %d)>
  443. Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
  444. printed in a message similar to the above.
  445. =item C<Failed 1 test, %.2f%% okay. %s>
  446. =item C<Failed %d/%d tests, %.2f%% okay. %s>
  447. If not all tests were successful, the script dies with one of the
  448. above messages.
  449. =back
  450. =head1 ENVIRONMENT
  451. Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
  452. of child processes.
  453. Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
  454. STDOUT were not a console. You may need to set this if you don't want
  455. harness to output more frequent progress messages using carriage returns.
  456. Some consoles may not handle carriage returns properly (which results
  457. in a somewhat messy output).
  458. Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
  459. to compile the test using C<perlcc> before running it.
  460. If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
  461. will check after each test whether new files appeared in that directory,
  462. and report them as
  463. LEAKED FILES: scr.tmp 0 my.db
  464. If relative, directory name is with respect to the current directory at
  465. the moment runtests() was called. Putting absolute path into
  466. C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
  467. The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
  468. switches used to invoke perl on each test. For example, setting
  469. C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
  470. warnings enabled.
  471. If C<HARNESS_COLUMNS> is set, then this value will be used for the
  472. width of the terminal. If it is not set then it will default to
  473. C<COLUMNS>. If this is not set, it will default to 80. Note that users
  474. of Bourne-sh based shells will need to C<export COLUMNS> for this
  475. module to use that variable.
  476. Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
  477. This allows the tests to determine if they are being executed through the
  478. harness or by any other means.
  479. =head1 SEE ALSO
  480. L<Test> for writing test scripts and also L<Benchmark> for the
  481. underlying timing routines.
  482. =head1 AUTHORS
  483. Either Tim Bunce or Andreas Koenig, we don't know. What we know for
  484. sure is, that it was inspired by Larry Wall's TEST script that came
  485. with perl distributions for ages. Numerous anonymous contributors
  486. exist. Current maintainer is Andreas Koenig.
  487. =head1 BUGS
  488. Test::Harness uses $^X to determine the perl binary to run the tests
  489. with. Test scripts running via the shebang (C<#!>) line may not be
  490. portable because $^X is not consistent for shebang scripts across
  491. platforms. This is no problem when Test::Harness is run with an
  492. absolute path to the perl binary or when $^X can be found in the path.
  493. =cut