mirror of
https://github.com/gnustep/tools-make.git
synced 2025-04-23 22:33:28 +00:00
try to improve usability ... friendlrier names and more documentation.
git-svn-id: svn+ssh://svn.gna.org/svn/gnustep/tools/make/trunk@32142 72102866-910b-0410-8b05-ffd578937521
This commit is contained in:
parent
fa221b249e
commit
5898fc0ac6
5 changed files with 71 additions and 34 deletions
|
@ -1,3 +1,12 @@
|
|||
2011-01-13 Richard Frith-Macdonald <rfm@gnu.org>
|
||||
|
||||
* TestFramework/Testing.h:
|
||||
* TestFramework/gnustep-tests:
|
||||
* TestFramework/runtest.sh:
|
||||
* TestFramework/README:
|
||||
Change test status reports to friendlier text.
|
||||
Record file and line number for each test.
|
||||
|
||||
2011-01-13 Richard Frith-Macdonald <rfm@gnu.org>
|
||||
|
||||
* TestFramework/gnustep-tests: Add --clean option
|
||||
|
|
|
@ -72,8 +72,9 @@ fix the problem and provide a patch, or at least report it at:
|
|||
https://savannah.gnu.org/bugs/?group=gnustep"
|
||||
|
||||
After the listing of any failures is a summary of counts of events:
|
||||
COMPLETED: The number of separate test files which were run.
|
||||
COMPILEFAIL: The number of separate test files which failed to run.
|
||||
COMPLETED: The number of separate test files which were run to completion.
|
||||
COMPILEFAIL: The number of separate test files which did not compile and run.
|
||||
CRASHED: The number of separate test files which failed while running.
|
||||
DASHED: The number of hopes dashed ... tests which did not pass, but
|
||||
which were not expected to pass (new code being worked on etc).
|
||||
FAIL: The number of individual tests failed
|
||||
|
@ -174,6 +175,28 @@ You may also specify a GNUmakefile.tests in a project level directory (ie one
|
|||
containing subdirectories of tests), and that makefile will be executed when
|
||||
you run tests on the project.
|
||||
|
||||
NB. When you supply custom makefile fragments which build helper programs etc,
|
||||
please remember to add an 'after-clean::' target in the makefile to clean up
|
||||
your custom files when gnustep-tests is run with the --clean option.
|
||||
|
||||
|
||||
Ignoring aborted test files
|
||||
---------------------------
|
||||
|
||||
When a test file crashes during running, or terminated with some sort of
|
||||
failure status (eg the main() function returns a non-zero value) the framework
|
||||
treats the test file as having 'aborted' ... it assumes that the program
|
||||
crashed during the tests and the tests did not complete.
|
||||
|
||||
On rare occasions you might actually want a test program to abort this way
|
||||
and have it treated as normal completion. In order to do this you simply
|
||||
create an additional fiel with the same name as the test program and a
|
||||
file extension of '.abort'.
|
||||
eg. If myTest.m is expected to crash, you would create myTest.m.abort to have
|
||||
that crash treated as a normal test completion.
|
||||
|
||||
|
||||
|
||||
Ignoring directories
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -75,17 +75,17 @@ static void pass(int testPassed, const char *format, ...)
|
|||
va_start(args, format);
|
||||
if (testPassed)
|
||||
{
|
||||
fprintf(stderr, "PASS: ");
|
||||
fprintf(stderr, "Passed test: ");
|
||||
testPassed = YES;
|
||||
}
|
||||
else if (YES == testHopeful)
|
||||
{
|
||||
fprintf(stderr, "DASHED: ");
|
||||
fprintf(stderr, "Dashed hope: ");
|
||||
testPassed = NO;
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "FAIL: ");
|
||||
fprintf(stderr, "Failed test: ");
|
||||
testPassed = NO;
|
||||
}
|
||||
vfprintf(stderr, format, args);
|
||||
|
@ -105,7 +105,7 @@ static void unresolved(const char *format, ...)
|
|||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
fprintf(stderr, "UNRESOLVED: ");
|
||||
fprintf(stderr, "Unresolved set: ");
|
||||
vfprintf(stderr, format, args);
|
||||
fprintf(stderr, "\n");
|
||||
va_end(args);
|
||||
|
@ -120,7 +120,7 @@ static void unsupported(const char *format, ...)
|
|||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
fprintf(stderr, "UNSUPPORTED: ");
|
||||
fprintf(stderr, "Unsupported set: ");
|
||||
vfprintf(stderr, format, args);
|
||||
fprintf(stderr, "\n");
|
||||
va_end(args);
|
||||
|
@ -133,7 +133,7 @@ static void unsupported(const char *format, ...)
|
|||
* Otherwise, the test passes.
|
||||
* Basically equivalent to pass() but with exception handling.
|
||||
*/
|
||||
#define PASS(expression, args...) \
|
||||
#define PASS(expression, format, ...) \
|
||||
NS_DURING \
|
||||
{ \
|
||||
int _cond; \
|
||||
|
@ -141,11 +141,11 @@ static void unsupported(const char *format, ...)
|
|||
[[NSGarbageCollector defaultCollector] collectExhaustively]; \
|
||||
_cond = (int) expression; \
|
||||
[[NSGarbageCollector defaultCollector] collectExhaustively]; \
|
||||
pass(_cond, ## args); \
|
||||
pass(_cond, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
} \
|
||||
NS_HANDLER \
|
||||
testRaised = [localException retain]; \
|
||||
pass(0, ## args); \
|
||||
pass(0, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
printf("%s: %s", [[testRaised name] UTF8String], \
|
||||
[[testRaised description] UTF8String]); \
|
||||
NS_ENDHANDLER
|
||||
|
@ -161,7 +161,7 @@ static void unsupported(const char *format, ...)
|
|||
* the string representation of both values is logged so that you
|
||||
* can get a better idea of what went wrong.
|
||||
*/
|
||||
#define PASS_EQUAL(expression, expect, args...) \
|
||||
#define PASS_EQUAL(expression, expect, format, ...) \
|
||||
NS_DURING \
|
||||
{ \
|
||||
int _cond; \
|
||||
|
@ -171,7 +171,7 @@ static void unsupported(const char *format, ...)
|
|||
_obj = ( expression );\
|
||||
_cond = _obj == expect || [_obj isEqual: expect]; \
|
||||
[[NSGarbageCollector defaultCollector] collectExhaustively]; \
|
||||
pass(_cond, ## args); \
|
||||
pass(_cond, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
if (0 == _cond) \
|
||||
{ \
|
||||
NSString *s = [_obj description]; \
|
||||
|
@ -191,7 +191,7 @@ static void unsupported(const char *format, ...)
|
|||
} \
|
||||
NS_HANDLER \
|
||||
testRaised = [localException retain]; \
|
||||
pass(0, ## args); \
|
||||
pass(0, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
printf("%s: %s", [[testRaised name] UTF8String], \
|
||||
[[testRaised description] UTF8String]); \
|
||||
NS_ENDHANDLER
|
||||
|
@ -203,15 +203,16 @@ static void unsupported(const char *format, ...)
|
|||
* You can supply nil for expectedExceptionName if you don't care about the
|
||||
* type of exception.
|
||||
*/
|
||||
#define PASS_EXCEPTION(code, expectedExceptionName, args...) \
|
||||
#define PASS_EXCEPTION(code, expectedExceptionName, format, ...) \
|
||||
NS_DURING \
|
||||
id _tmp = testRaised; testRaised = nil; [_tmp release]; \
|
||||
{ code; } \
|
||||
pass(0, ## args); \
|
||||
pass(0, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
NS_HANDLER \
|
||||
testRaised = [localException retain]; \
|
||||
pass((expectedExceptionName == nil \
|
||||
|| [[testRaised name] isEqual: expectedExceptionName]), ## args); \
|
||||
|| [[testRaised name] isEqual: expectedExceptionName]), \
|
||||
" %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
if (NO == [expectedExceptionName isEqual: [testRaised name]]) \
|
||||
fprintf(stderr, "Expected '%s' and got '%s'\n", \
|
||||
[expectedExceptionName UTF8String], \
|
||||
|
@ -222,14 +223,14 @@ static void unsupported(const char *format, ...)
|
|||
* code to run to completion without an exception being thrown, but you don't
|
||||
* have a particular expression to be checked.
|
||||
*/
|
||||
#define PASS_RUNS(code, args...) \
|
||||
#define PASS_RUNS(code, format, ...) \
|
||||
NS_DURING \
|
||||
id _tmp = testRaised; testRaised = nil; [_tmp release]; \
|
||||
{ code; } \
|
||||
pass(1, ## args); \
|
||||
pass(1, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
NS_HANDLER \
|
||||
testRaised = [localException retain]; \
|
||||
pass(0, ## args); \
|
||||
pass(0, " %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
printf("%s: %s", [[testRaised name] UTF8String], \
|
||||
[[testRaised description] UTF8String]); \
|
||||
NS_ENDHANDLER
|
||||
|
@ -267,7 +268,7 @@ static void unsupported(const char *format, ...)
|
|||
* a printf style format string and variable arguments to print a message
|
||||
* describing the set.
|
||||
*/
|
||||
#define END_SET(desc, args...) \
|
||||
#define END_SET(format, ...) \
|
||||
} \
|
||||
[_setPool release]; \
|
||||
NS_HANDLER \
|
||||
|
@ -278,13 +279,13 @@ static void unsupported(const char *format, ...)
|
|||
[[localException reason] UTF8String], \
|
||||
[[[localException userInfo] description] UTF8String]); \
|
||||
} \
|
||||
unresolved(desc, ## args); \
|
||||
unresolved(" %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
NS_ENDHANDLER \
|
||||
testHopeful = save_hopeful; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
unsupported(desc, ## args); \
|
||||
unsupported(" %s:%d ... " format, __FILE__, __LINE__, ## __VA_ARGS__); \
|
||||
}
|
||||
|
||||
/* The NEED macro takes a test macro as an argument and breaks out of a set
|
||||
|
|
|
@ -81,7 +81,8 @@ do
|
|||
echo
|
||||
echo "After the listing of any failures is a summary of counts of events:"
|
||||
echo "COMPLETED: The number of separate test files which were run."
|
||||
echo "COMPILEFAIL: The number of separate test files which failed to run."
|
||||
echo "COMPILEFAIL: The number of separate test files which did not start."
|
||||
echo "ABORTED: The number of separate test files which failed to run."
|
||||
echo "DASHED: The number of hopes dashed ... tests which did not"
|
||||
echo " pass, but which were not expected to pass (new code"
|
||||
echo " beign worked on etc)."
|
||||
|
@ -161,14 +162,14 @@ run_test_file ()
|
|||
cat $CWD/tests.tmp >> $CWD/tests.log
|
||||
|
||||
# Extract the summary information and add it to the summary file.
|
||||
grep "^\(PASS\|FAIL\|COMPILEFAIL\|COMPLETED\|DASHED\|UNRESOLVED\|UNSUPPORTED\)" $CWD/tests.tmp > $CWD/tests.sum.tmp
|
||||
grep "^\(Passed test\|Failed test\|Uncompiled file\|Completed file\|Aborted file\|Dashed hope\|Unresolved set\|Unsupported set\):" $CWD/tests.tmp > $CWD/tests.sum.tmp
|
||||
cat $CWD/tests.sum.tmp >> $CWD/tests.sum
|
||||
|
||||
# If there were failures or unresolved tests then report them...
|
||||
if grep -L "^\(COMPILEFAIL\|FAIL\|UNRESOLVED\)" $CWD/tests.sum.tmp > /dev/null; then
|
||||
if grep -L "^\(Uncompiled file\|Aborted file\|Unresolved set\|Failed test\):" $CWD/tests.sum.tmp > /dev/null; then
|
||||
echo
|
||||
echo $TESTFILE:
|
||||
grep "^\(COMPILEFAIL\|FAIL\|UNRESOLVED\)" $CWD/tests.sum.tmp
|
||||
grep "^\(Uncompiled file\|Aborted file \|Unresolved set\|Failed test\):" $CWD/tests.sum.tmp
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -193,7 +194,6 @@ if [ x"$TESTDIRS" = x ]
|
|||
then
|
||||
if [ x"$GSCLEAN" = xYES ]
|
||||
then
|
||||
rm -f GNUmakefile
|
||||
rm -rf obj
|
||||
else
|
||||
# Run specific individual test files.
|
||||
|
@ -213,10 +213,9 @@ else
|
|||
then
|
||||
if [ -f GNUmakefile.tests ]
|
||||
then
|
||||
$MAKE_CMD -f GNUmakefile.tests $MAKEFLAGS clean 2>&1
|
||||
$MAKE_CMD -f GNUmakefile.tests $MAKEFLAGS clean 2>&1 >> $CWD/tests.log
|
||||
fi
|
||||
fi
|
||||
rm -f GNUmakefile
|
||||
rm -rf obj
|
||||
cd $CWD
|
||||
else
|
||||
|
@ -244,12 +243,17 @@ fi
|
|||
|
||||
if [ x"$GSCLEAN" = xYES ]
|
||||
then
|
||||
rm -f tests.log
|
||||
rm -f tests.sum
|
||||
else
|
||||
# Make some stats.
|
||||
if [ -r tests.sum ]
|
||||
then
|
||||
grep "^[A-Z]*:" tests.sum | cut -d: -f1 | sort | uniq -c > tests.tmp
|
||||
# Nasty pipeline of commands ...
|
||||
# Look for each type of test result, sort and count the results,
|
||||
# append 's' to each summary, then remove the trailing 's' from
|
||||
# any summary with only a single result so the output is pretty.
|
||||
grep "^\(Passed test\|Failed test\|Uncompiled file\|Completed file\|Aborted file\|Dashed hope\|Unresolved set\|Unsupported set\):" tests.sum | cut -d: -f1 | sort | uniq -c | sed -e 's/.*/&s/' | sed -e 's/^\([^0-9]*1[^0-9].*\)s$/\1/'> tests.tmp
|
||||
else
|
||||
echo "No tests found." > tests.tmp
|
||||
fi
|
||||
|
|
|
@ -138,7 +138,7 @@ then
|
|||
$MAKE_CMD $MAKEFLAGS messages=yes debug=yes 2>&1
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
echo COMPILEFAIL: $1 >&2
|
||||
echo "Uncompiled file: $1" >&2
|
||||
else
|
||||
# We want aggressive memory checking.
|
||||
|
||||
|
@ -161,12 +161,12 @@ then
|
|||
then
|
||||
if [ -r $NAME.abort ]
|
||||
then
|
||||
echo COMPLETED: $1 >&2
|
||||
echo "Completed file: $1" >&2
|
||||
else
|
||||
echo FAIL: $1 CRASHED without running all tests!!>&2
|
||||
echo "Aborted file: $1 aborted without running all tests!" >&2
|
||||
fi
|
||||
else
|
||||
echo COMPLETED: $1 >&2
|
||||
echo "Completed file: $1" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
Loading…
Reference in a new issue