#!/bin/sh # # Runs tests for the GNUstep Testsuite # # Copyright (C) 2005-2011 Free Software Foundation, Inc. # # Written by: Alexander Malmberg # Updates by: Richard Frith-Macdonald # # This package is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # # Usage: gnustep-tests [directory | test.m] # # Runs the tests in the specified directory (or those in the individual file) # or all the tests in subdirectories of the current directory if no arguments # are given. # A summary is written to tests.sum, a log to tests.log, and a brief # summary to stdout. # The log and summary from the previous testrun are renamed to # oldtests.log and oldtests.sum, available for comparison. if test -z "$GNUSTEP_MAKEFILES"; then GNUSTEP_MAKEFILES=`gnustep-config --variable=GNUSTEP_MAKEFILES 2>/dev/null` if test -z "$GNUSTEP_MAKEFILES"; then echo "You need to have GNUstep-make installed and set up." echo "Did you remember to source GNUstep.sh?" else echo "You forgot to set your GNUSTEP_MAKEFILES environment variable." echo "Setting it to $GNUSTEP_MAKEFILES during this test run." export GNUSTEP_MAKEFILES . $GNUSTEP_MAKEFILES/GNUstep.sh fi fi GSTESTMODE=normal # Argument checking while test $# != 0 do gs_option= case $1 in --clean) GSTESTMODE=clean ;; --documentation) echo echo "$0: Script to run the GNUstep testsuite" echo "Usage: gnustep-tests [directory | test1.m [test2.m ...]]" echo "Runs the specified tests, or any in subdirectories of the" echo "current directory if no arguments are given." echo "Use 'gnustep-tests --help' for basic help." echo cat $GNUSTEP_MAKEFILES/TestFramework/README exit 0 ;; --failfast) GSTESTMODE=failfast ;; --help | -h) echo echo "$0: Script to run the GNUstep testsuite" echo "Usage: gnustep-tests [directory | test.m]]" echo "Runs the specified tests, or any in subdirectories of the" echo "current directory if no arguments are given." echo "Use 'gnustep-tests --documentation' for full details." echo "Use 'gnustep-tests --failfast' to stop at the first failure." echo "Use 'gnustep-tests --clean' to remove old logs and leftover files." echo echo "Interpreting the output" echo "-----------------------" echo "The summary output lists all test failures ... there should not" echo "be any. If a test fails then either there is a problem in the" echo "software being tested, or a problem in the test itself. Either" echo "way, you should try to fix the problem and provide a patch, or" echo "at least report it at: https://savannah.gnu.org/bugs/?group=gnustep" echo exit 0 ;; --debug | -d) # ignore for backward compatibility. ;; *) break ;; esac shift done export GSTESTMODE GSTESTDIR=`pwd` export GSTESTDIR GSTESTLOG=$GSTESTDIR/tests.log export GSTESTLOG GSTESTSUM=$GSTESTDIR/tests.sum export GSTESTSUM if test ! "$MAKE_CMD" then MAKE_CMD=`gnustep-config --variable=GNUMAKE` $MAKE_CMD --version > /dev/null 2>&1 if test $? != 0 then MAKE_CMD=gmake $MAKE_CMD --version > /dev/null 2>&1 if test $? != 0 then MAKE_CMD=make fi fi fi export MAKE_CMD if test $# = 0 then echo "Checking for presence of test subdirectories ..." fi TEMP=`echo *` TESTS= TESTDIRS= for file in $TEMP do if test -d $file -a $file != CVS -a $file != obj then TESTDIRS="$TESTDIRS $file" fi done if test x$1 != x then if test -d $1 then # Only find in the directories specified. TESTDIRS=$* else TESTDIRS=`dirname $1` TESTS=$1 fi fi RUNCMD=$GNUSTEP_MAKEFILES/TestFramework/runtest.sh RUNEXIT=0 # Function for platforms where grep can't search for multiple patterns. extract() { f=$1 shift while test $# != 0 do grep "$1" "$f" shift done } # Function for platforms where grep can't search for multiple patterns. present() { f=$1 shift while test $# != 0 do grep "$1" "$f" >/dev/null if test $? = "0" then return 0 fi shift done return 1 } run_test_file () { if test "$GSTESTMODE" != "clean" then echo >> $GSTESTLOG echo Testing $TESTFILE... >> $GSTESTLOG echo >> $GSTESTSUM # Run the test. Log everything to a temporary file. $RUNCMD $run_args $TESTFILE > $GSTESTLOG.tmp 2>&1 RUNEXIT=$? if test "$RUNEXIT" != "0" -a "$RUNEXIT" != "99" then echo "Failed script: $TESTFILE" >> $GSTESTLOG.tmp fi # Add the information to the detailed log. cat $GSTESTLOG.tmp >> $GSTESTLOG # Extract the summary information and add it to the summary file. extract $GSTESTLOG.tmp "^Passed test:" "^Failed test:" "^Failed build:" "^Completed file:" "^Failed file:" "^Failed script:" "^Dashed hope:" "^Failed set:" "^Skipped set:" > $GSTESTSUM.tmp cat $GSTESTSUM.tmp >> $GSTESTSUM # If there were failures or unresolved tests then report them... if present $GSTESTSUM.tmp "^Failed script:" "^Failed build:" "^Failed file:" "^Failed set:" "^Failed test:" then echo echo $TESTFILE: extract $GSTESTSUM.tmp "^Failed script:" "^Failed build:" "^Failed file:" "^Failed set:" "^Failed test:" fi fi } # Replace the old files. if test -f tests.log then mv tests.log oldtests.log fi if test -f tests.sum then mv tests.sum oldtests.sum fi SUMD=. for TESTDIR in $TESTDIRS do found=no # Get the names of all subdirectories containing source files. SRCDIRS=`find $TESTDIR -name \*.m | sed -e 's;/[^/]*$;;' | sort -u | sed -e 's/\(^\| \)X[^ ]*//g'` if test x"$SRCDIRS" = x then continue fi SUMD=$TESTDIR for dir in $SRCDIRS do if test -f $dir/IGNORE then continue fi found=yes cd $dir if test "$GSTESTMODE" = "clean" then echo "--- Cleaning tests in $dir ---" rm -rf core obj GNUmakefile.tmp rm -f tests.tmp tests.sum.tmp tests.log.tmp rm -f tests.log tests.sum rm -f oldtests.log oldtests.sum else echo "--- Running tests in $dir ---" if test -r ./Start.sh -a -x ./Start.sh then ./Start.sh fi fi if test x"$TESTS" = x then TESTS=`echo *.m | sort | sed -e 's/\(^\| \)X[^ ]*//g'` fi # If there is a GNUmakefile.tests in the directory, run it first. if test -f GNUmakefile.tests then if test "$GSTESTMODE" = "clean" then $MAKE_CMD -f GNUmakefile.tests $MAKEFLAGS clean 2>&1 else $MAKE_CMD -f GNUmakefile.tests $MAKEFLAGS debug=yes 2>&1 fi fi # Now we process each test file in turn. for TESTFILE in $TESTS do run_test_file if test "$RUNEXIT" != "0" then break fi done TESTS= if test "$GSTESTMODE" != "clean" then # And perform the cleanup script. if test -r ./End.sh -a -x ./End.sh then ./End.sh fi fi cd $GSTESTDIR if test "$RUNEXIT" != "0" then break fi done if test $found = no then echo "No tests found in $TESTDIR" fi if test "$RUNEXIT" != "0" then break fi done if test "$GSTESTMODE" = "clean" then rm -rf core obj GNUmakefile.tmp rm -f tests.tmp tests.sum.tmp tests.log.tmp rm -f tests.log tests.sum rm -f oldtests.log oldtests.sum else # Make some stats. if test -r tests.sum then # Nasty pipeline of commands ... # Look for each type of test result, sort and count the results, # append 's' to each summary, then remove the trailing 's' from # any summary with only a single result so the output is pretty. # Sort the resulting lines by number of each status with the most # common (hopefully passes) output first. # NB. we omit the 'Completed file' tests as uninteresting ... users # generally only want to see the total pass count and any problems. extract tests.sum "^Passed test:" "^Failed test:" "^Failed script:" "^Failed build:" "^Failed file:" "^Dashed hope:" "^Failed set:" "^Skipped set:" | cut -d: -f1 | sort | uniq -c | sed -e 's/.*/&s/' | sed -e 's/^\([^0-9]*1[^0-9].*\)s$/\1/' | sort -n -b -r > tests.tmp else echo "No tests found." > tests.tmp fi echo >> tests.sum cat tests.tmp >> tests.sum echo cat tests.tmp echo fi # In the case where we ran a single testsuite, we allow the Summary.sh # script in that testsuite to generate our summary. if test x"$TESTDIRS" = x"$SUMD" -a -r $SUMD/Summary.sh -a -x $SUMD/Summary.sh then RUNCMD=$SUMD/Summary.sh else RUNCMD=$GNUSTEP_MAKEFILES/TestFramework/Summary.sh fi $RUNCMD # Delete the temporary file. rm -f tests.tmp tests.sum.tmp tests.log.tmp