Server IP : 103.119.228.120 / Your IP : 3.142.133.210 Web Server : Apache System : Linux v8.techscape8.com 3.10.0-1160.119.1.el7.tuxcare.els2.x86_64 #1 SMP Mon Jul 15 12:09:18 UTC 2024 x86_64 User : nobody ( 99) PHP Version : 5.6.40 Disable Function : shell_exec,symlink,system,exec,proc_get_status,proc_nice,proc_terminate,define_syslog_variables,syslog,openlog,closelog,escapeshellcmd,passthru,ocinum cols,ini_alter,leak,listen,chgrp,apache_note,apache_setenv,debugger_on,debugger_off,ftp_exec,dl,dll,myshellexec,proc_open,socket_bind,proc_close,escapeshellarg,parse_ini_filepopen,fpassthru,exec,passthru,escapeshellarg,escapeshellcmd,proc_close,proc_open,ini_alter,popen,show_source,proc_nice,proc_terminate,proc_get_status,proc_close,pfsockopen,leak,apache_child_terminate,posix_kill,posix_mkfifo,posix_setpgid,posix_setsid,posix_setuid,dl,symlink,shell_exec,system,dl,passthru,escapeshellarg,escapeshellcmd,myshellexec,c99_buff_prepare,c99_sess_put,fpassthru,getdisfunc,fx29exec,fx29exec2,is_windows,disp_freespace,fx29sh_getupdate,fx29_buff_prepare,fx29_sess_put,fx29shexit,fx29fsearch,fx29ftpbrutecheck,fx29sh_tools,fx29sh_about,milw0rm,imagez,sh_name,myshellexec,checkproxyhost,dosyayicek,c99_buff_prepare,c99_sess_put,c99getsource,c99sh_getupdate,c99fsearch,c99shexit,view_perms,posix_getpwuid,posix_getgrgid,posix_kill,parse_perms,parsesort,view_perms_color,set_encoder_input,ls_setcheckboxall,ls_reverse_all,rsg_read,rsg_glob,selfURL,dispsecinfo,unix2DosTime,addFile,system,get_users,view_size,DirFiles,DirFilesWide,DirPrintHTMLHeaders,GetFilesTotal,GetTitles,GetTimeTotal,GetMatchesCount,GetFileMatchesCount,GetResultFiles,fs_copy_dir,fs_copy_obj,fs_move_dir,fs_move_obj,fs_rmdir,SearchText,getmicrotime MySQL : ON | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : ON | Pkexec : ON Directory : /usr/local/ssl/local/ssl/local/share/perl5/Test/Builder/ |
Upload File : |
package Test::Builder::Tester; use strict; our $VERSION = '1.302062'; use Test::Builder; use Symbol; use Carp; =head1 NAME Test::Builder::Tester - test testsuites that have been built with Test::Builder =head1 SYNOPSIS use Test::Builder::Tester tests => 1; use Test::More; test_out("not ok 1 - foo"); test_fail(+1); fail("foo"); test_test("fail works"); =head1 DESCRIPTION A module that helps you test testing modules that are built with L<Test::Builder>. The testing system is designed to be used by performing a three step process for each test you wish to test. This process starts with using C<test_out> and C<test_err> in advance to declare what the testsuite you are testing will output with L<Test::Builder> to stdout and stderr. You then can run the test(s) from your test suite that call L<Test::Builder>. At this point the output of L<Test::Builder> is safely captured by L<Test::Builder::Tester> rather than being interpreted as real test output. The final stage is to call C<test_test> that will simply compare what you predeclared to what L<Test::Builder> actually outputted, and report the results back with a "ok" or "not ok" (with debugging) to the normal output. =cut #### # set up testing #### my $t = Test::Builder->new; ### # make us an exporter ### use Exporter; our @ISA = qw(Exporter); our @EXPORT = qw(test_out test_err test_fail test_diag test_test line_num); sub import { my $class = shift; my(@plan) = @_; my $caller = caller; $t->exported_to($caller); $t->plan(@plan); my @imports = (); foreach my $idx ( 0 .. $#plan ) { if( $plan[$idx] eq 'import' ) { @imports = @{ $plan[ $idx + 1 ] }; last; } } __PACKAGE__->export_to_level( 1, __PACKAGE__, @imports ); } ### # set up file handles ### # create some private file handles my $output_handle = gensym; my $error_handle = gensym; # and tie them to this package my $out = tie *$output_handle, "Test::Builder::Tester::Tie", "STDOUT"; my $err = tie *$error_handle, "Test::Builder::Tester::Tie", "STDERR"; #### # exported functions #### # for remembering that we're testing and where we're testing at my $testing = 0; my $testing_num; my $original_is_passing; # remembering where the file handles were originally connected my $original_output_handle; my $original_failure_handle; my $original_todo_handle; my $original_formatter; my $original_harness_env; # function that starts testing and redirects the filehandles for now sub _start_testing { # Hack for things that conditioned on Test-Stream being loaded $INC{'Test/Stream.pm'} ||= 'fake' if $INC{'Test/Moose/More.pm'}; # even if we're running under Test::Harness pretend we're not # for now. This needed so Test::Builder doesn't add extra spaces $original_harness_env = $ENV{HARNESS_ACTIVE} || 0; $ENV{HARNESS_ACTIVE} = 0; my $hub = $t->{Hub} || Test2::API::test2_stack->top; $original_formatter = $hub->format; unless ($original_formatter && $original_formatter->isa('Test::Builder::Formatter')) { my $fmt = Test::Builder::Formatter->new; $hub->format($fmt); } # remember what the handles were set to $original_output_handle = $t->output(); $original_failure_handle = $t->failure_output(); $original_todo_handle = $t->todo_output(); # switch out to our own handles $t->output($output_handle); $t->failure_output($error_handle); $t->todo_output($output_handle); # clear the expected list $out->reset(); $err->reset(); # remember that we're testing $testing = 1; $testing_num = $t->current_test; $t->current_test(0); $original_is_passing = $t->is_passing; $t->is_passing(1); # look, we shouldn't do the ending stuff $t->no_ending(1); } =head2 Functions These are the six methods that are exported as default. =over 4 =item test_out =item test_err Procedures for predeclaring the output that your test suite is expected to produce until C<test_test> is called. These procedures automatically assume that each line terminates with "\n". So test_out("ok 1","ok 2"); is the same as test_out("ok 1\nok 2"); which is even the same as test_out("ok 1"); test_out("ok 2"); Once C<test_out> or C<test_err> (or C<test_fail> or C<test_diag>) have been called, all further output from L<Test::Builder> will be captured by L<Test::Builder::Tester>. This means that you will not be able perform further tests to the normal output in the normal way until you call C<test_test> (well, unless you manually meddle with the output filehandles) =cut sub test_out { # do we need to do any setup? _start_testing() unless $testing; $out->expect(@_); } sub test_err { # do we need to do any setup? _start_testing() unless $testing; $err->expect(@_); } =item test_fail Because the standard failure message that L<Test::Builder> produces whenever a test fails will be a common occurrence in your test error output, and because it has changed between Test::Builder versions, rather than forcing you to call C<test_err> with the string all the time like so test_err("# Failed test ($0 at line ".line_num(+1).")"); C<test_fail> exists as a convenience function that can be called instead. It takes one argument, the offset from the current line that the line that causes the fail is on. test_fail(+1); This means that the example in the synopsis could be rewritten more simply as: test_out("not ok 1 - foo"); test_fail(+1); fail("foo"); test_test("fail works"); =cut sub test_fail { # do we need to do any setup? _start_testing() unless $testing; # work out what line we should be on my( $package, $filename, $line ) = caller; $line = $line + ( shift() || 0 ); # prevent warnings # expect that on stderr $err->expect("# Failed test ($filename at line $line)"); } =item test_diag As most of the remaining expected output to the error stream will be created by L<Test::Builder>'s C<diag> function, L<Test::Builder::Tester> provides a convenience function C<test_diag> that you can use instead of C<test_err>. The C<test_diag> function prepends comment hashes and spacing to the start and newlines to the end of the expected output passed to it and adds it to the list of expected error output. So, instead of writing test_err("# Couldn't open file"); you can write test_diag("Couldn't open file"); Remember that L<Test::Builder>'s diag function will not add newlines to the end of output and test_diag will. So to check Test::Builder->new->diag("foo\n","bar\n"); You would do test_diag("foo","bar") without the newlines. =cut sub test_diag { # do we need to do any setup? _start_testing() unless $testing; # expect the same thing, but prepended with "# " local $_; $err->expect( map { "# $_" } @_ ); } =item test_test Actually performs the output check testing the tests, comparing the data (with C<eq>) that we have captured from L<Test::Builder> against what was declared with C<test_out> and C<test_err>. This takes name/value pairs that effect how the test is run. =over =item title (synonym 'name', 'label') The name of the test that will be displayed after the C<ok> or C<not ok>. =item skip_out Setting this to a true value will cause the test to ignore if the output sent by the test to the output stream does not match that declared with C<test_out>. =item skip_err Setting this to a true value will cause the test to ignore if the output sent by the test to the error stream does not match that declared with C<test_err>. =back As a convenience, if only one argument is passed then this argument is assumed to be the name of the test (as in the above examples.) Once C<test_test> has been run test output will be redirected back to the original filehandles that L<Test::Builder> was connected to (probably STDOUT and STDERR,) meaning any further tests you run will function normally and cause success/errors for L<Test::Harness>. =cut sub test_test { # END the hack delete $INC{'Test/Stream.pm'} if $INC{'Test/Stream.pm'} && $INC{'Test/Stream.pm'} eq 'fake'; # decode the arguments as described in the pod my $mess; my %args; if( @_ == 1 ) { $mess = shift } else { %args = @_; $mess = $args{name} if exists( $args{name} ); $mess = $args{title} if exists( $args{title} ); $mess = $args{label} if exists( $args{label} ); } # er, are we testing? croak "Not testing. You must declare output with a test function first." unless $testing; my $hub = $t->{Hub} || Test2::API::test2_stack->top; $hub->format($original_formatter); # okay, reconnect the test suite back to the saved handles $t->output($original_output_handle); $t->failure_output($original_failure_handle); $t->todo_output($original_todo_handle); # restore the test no, etc, back to the original point $t->current_test($testing_num); $testing = 0; $t->is_passing($original_is_passing); # re-enable the original setting of the harness $ENV{HARNESS_ACTIVE} = $original_harness_env; # check the output we've stashed unless( $t->ok( ( $args{skip_out} || $out->check ) && ( $args{skip_err} || $err->check ), $mess ) ) { # print out the diagnostic information about why this # test failed local $_; $t->diag( map { "$_\n" } $out->complaint ) unless $args{skip_out} || $out->check; $t->diag( map { "$_\n" } $err->complaint ) unless $args{skip_err} || $err->check; } } =item line_num A utility function that returns the line number that the function was called on. You can pass it an offset which will be added to the result. This is very useful for working out the correct text of diagnostic functions that contain line numbers. Essentially this is the same as the C<__LINE__> macro, but the C<line_num(+3)> idiom is arguably nicer. =cut sub line_num { my( $package, $filename, $line ) = caller; return $line + ( shift() || 0 ); # prevent warnings } =back In addition to the six exported functions there exists one function that can only be accessed with a fully qualified function call. =over 4 =item color When C<test_test> is called and the output that your tests generate does not match that which you declared, C<test_test> will print out debug information showing the two conflicting versions. As this output itself is debug information it can be confusing which part of the output is from C<test_test> and which was the original output from your original tests. Also, it may be hard to spot things like extraneous whitespace at the end of lines that may cause your test to fail even though the output looks similar. To assist you C<test_test> can colour the background of the debug information to disambiguate the different types of output. The debug output will have its background coloured green and red. The green part represents the text which is the same between the executed and actual output, the red shows which part differs. The C<color> function determines if colouring should occur or not. Passing it a true or false value will enable or disable colouring respectively, and the function called with no argument will return the current setting. To enable colouring from the command line, you can use the L<Text::Builder::Tester::Color> module like so: perl -Mlib=Text::Builder::Tester::Color test.t Or by including the L<Test::Builder::Tester::Color> module directly in the PERL5LIB. =cut my $color; sub color { $color = shift if @_; $color; } =back =head1 BUGS Test::Builder::Tester does not handle plans well. It has never done anything special with plans. This means that plans from outside Test::Builder::Tester will effect Test::Builder::Tester, worse plans when using Test::Builder::Tester will effect overall testing. At this point there are no plans to fix this bug as people have come to depend on it, and Test::Builder::Tester is now discouraged in favor of C<Test2::API::intercept()>. See L<https://github.com/Test-More/test-more/issues/667> Calls C<< Test::Builder->no_ending >> turning off the ending tests. This is needed as otherwise it will trip out because we've run more tests than we strictly should have and it'll register any failures we had that we were testing for as real failures. The color function doesn't work unless L<Term::ANSIColor> is compatible with your terminal. Additionally, L<Win32::Console::ANSI> must be installed on windows platforms for color output. Bugs (and requests for new features) can be reported to the author though GitHub: L<https://github.com/Test-More/test-more/issues> =head1 AUTHOR Copyright Mark Fowler E<lt>mark@twoshortplanks.comE<gt> 2002, 2004. Some code taken from L<Test::More> and L<Test::Catch>, written by Michael G Schwern E<lt>schwern@pobox.comE<gt>. Hence, those parts Copyright Micheal G Schwern 2001. Used and distributed with permission. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =head1 MAINTAINERS =over 4 =item Chad Granum E<lt>exodist@cpan.orgE<gt> =back =head1 NOTES Thanks to Richard Clamp E<lt>richardc@unixbeard.netE<gt> for letting me use his testing system to try this module out on. =head1 SEE ALSO L<Test::Builder>, L<Test::Builder::Tester::Color>, L<Test::More>. =cut 1; #################################################################### # Helper class that is used to remember expected and received data package Test::Builder::Tester::Tie; ## # add line(s) to be expected sub expect { my $self = shift; my @checks = @_; foreach my $check (@checks) { $check = $self->_account_for_subtest($check); $check = $self->_translate_Failed_check($check); push @{ $self->{wanted} }, ref $check ? $check : "$check\n"; } } sub _account_for_subtest { my( $self, $check ) = @_; my $hub = $t->{Stack}->top; my $nesting = $hub->isa('Test2::Hub::Subtest') ? $hub->nested : 0; return ref($check) ? $check : (' ' x $nesting) . $check; } sub _translate_Failed_check { my( $self, $check ) = @_; if( $check =~ /\A(.*)# (Failed .*test) \((.*?) at line (\d+)\)\Z(?!\n)/ ) { $check = "/\Q$1\E#\\s+\Q$2\E.*?\\n?.*?\Qat $3\E line \Q$4\E.*\\n?/"; } return $check; } ## # return true iff the expected data matches the got data sub check { my $self = shift; # turn off warnings as these might be undef local $^W = 0; my @checks = @{ $self->{wanted} }; my $got = $self->{got}; foreach my $check (@checks) { $check = "\Q$check\E" unless( $check =~ s,^/(.*)/$,$1, or ref $check ); return 0 unless $got =~ s/^$check//; } return length $got == 0; } ## # a complaint message about the inputs not matching (to be # used for debugging messages) sub complaint { my $self = shift; my $type = $self->type; my $got = $self->got; my $wanted = join '', @{ $self->wanted }; # are we running in colour mode? if(Test::Builder::Tester::color) { # get color eval { require Term::ANSIColor }; unless($@) { eval { require Win32::Console::ANSI } if 'MSWin32' eq $^O; # support color on windows platforms # colours my $green = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_green"); my $red = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_red"); my $reset = Term::ANSIColor::color("reset"); # work out where the two strings start to differ my $char = 0; $char++ while substr( $got, $char, 1 ) eq substr( $wanted, $char, 1 ); # get the start string and the two end strings my $start = $green . substr( $wanted, 0, $char ); my $gotend = $red . substr( $got, $char ) . $reset; my $wantedend = $red . substr( $wanted, $char ) . $reset; # make the start turn green on and off $start =~ s/\n/$reset\n$green/g; # make the ends turn red on and off $gotend =~ s/\n/$reset\n$red/g; $wantedend =~ s/\n/$reset\n$red/g; # rebuild the strings $got = $start . $gotend; $wanted = $start . $wantedend; } } my @got = split "\n", $got; my @wanted = split "\n", $wanted; $got = ""; $wanted = ""; while (@got || @wanted) { my $g = shift @got || ""; my $w = shift @wanted || ""; if ($g ne $w) { if($g =~ s/(\s+)$/ |> /g) { $g .= ($_ eq ' ' ? '_' : '\t') for split '', $1; } if($w =~ s/(\s+)$/ |> /g) { $w .= ($_ eq ' ' ? '_' : '\t') for split '', $1; } $g = "> $g"; $w = "> $w"; } else { $g = " $g"; $w = " $w"; } $got = $got ? "$got\n$g" : $g; $wanted = $wanted ? "$wanted\n$w" : $w; } return "$type is:\n" . "$got\nnot:\n$wanted\nas expected"; } ## # forget all expected and got data sub reset { my $self = shift; %$self = ( type => $self->{type}, got => '', wanted => [], ); } sub got { my $self = shift; return $self->{got}; } sub wanted { my $self = shift; return $self->{wanted}; } sub type { my $self = shift; return $self->{type}; } ### # tie interface ### sub PRINT { my $self = shift; $self->{got} .= join '', @_; } sub TIEHANDLE { my( $class, $type ) = @_; my $self = bless { type => $type }, $class; $self->reset; return $self; } sub READ { } sub READLINE { } sub GETC { } sub FILENO { } 1;