iimprove SWI compat
This commit is contained in:
parent
8421af32e8
commit
36a0023fbf
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -25,3 +25,6 @@
|
||||
[submodule "packages/semweb"]
|
||||
path = packages/semweb
|
||||
url = ssh://vsc@yap.git.sourceforge.net/gitroot/yap/semweb
|
||||
[submodule "packages/plunit"]
|
||||
path = packages/plunit
|
||||
url = ssh://vsc@yap.git.sourceforge.net/gitroot/yap/plunit
|
||||
|
121
C/c_interface.c
121
C/c_interface.c
@ -1427,105 +1427,106 @@ execute_cargs_back(PredEntry *pe, CPredicate exec_code, struct foreign_context *
|
||||
case 1:
|
||||
{
|
||||
CBPredicate1 code1 = (CBPredicate1)exec_code;
|
||||
return ((code1)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
return ((code1)(&B->cp_a1-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 2:
|
||||
{
|
||||
CBPredicate2 code2 = (CBPredicate2)exec_code;
|
||||
return ((code2)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
return ((code2)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 3:
|
||||
{
|
||||
CBPredicate3 code3 = (CBPredicate3)exec_code;
|
||||
return ((code3)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
return ((code3)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 4:
|
||||
{
|
||||
CBPredicate4 code4 = (CBPredicate4)exec_code;
|
||||
return ((code4)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
return ((code4)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 5:
|
||||
{
|
||||
CBPredicate5 code5 = (CBPredicate5)exec_code;
|
||||
return ((code5)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS), ctx));
|
||||
return ((code5)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 6:
|
||||
{
|
||||
CBPredicate6 code6 = (CBPredicate6)exec_code;
|
||||
return ((code6)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG6) PASS_REGS),
|
||||
return ((code6)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
&B->cp_a6-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 7:
|
||||
{
|
||||
CBPredicate7 code7 = (CBPredicate7)exec_code;
|
||||
return ((code7)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG6) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG7) PASS_REGS),
|
||||
return ((code7)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
&B->cp_a6-LCL0,
|
||||
&B->cp_a7-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 8:
|
||||
{
|
||||
CBPredicate8 code8 = (CBPredicate8)exec_code;
|
||||
return ((code8)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG6) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG7) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG8) PASS_REGS),
|
||||
return ((code8)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
&B->cp_a6-LCL0,
|
||||
&B->cp_a7-LCL0,
|
||||
&B->cp_a8-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 9:
|
||||
{
|
||||
CBPredicate9 code9 = (CBPredicate9)exec_code;
|
||||
return ((code9)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG6) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG7) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG8) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG9) PASS_REGS),
|
||||
return ((code9)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
&B->cp_a6-LCL0,
|
||||
&B->cp_a7-LCL0,
|
||||
&B->cp_a8-LCL0,
|
||||
&B->cp_a9-LCL0,
|
||||
ctx));
|
||||
}
|
||||
case 10:
|
||||
{
|
||||
CBPredicate10 code10 = (CBPredicate10)exec_code;
|
||||
return ((code10)(Yap_InitSlot(Deref(ARG1) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG2) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG3) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG4) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG5) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG6) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG7) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG8) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG9) PASS_REGS),
|
||||
Yap_InitSlot(Deref(ARG10) PASS_REGS),
|
||||
return ((code10)(&B->cp_a1-LCL0,
|
||||
&B->cp_a2-LCL0,
|
||||
&B->cp_a3-LCL0,
|
||||
&B->cp_a4-LCL0,
|
||||
&B->cp_a5-LCL0,
|
||||
&B->cp_a6-LCL0,
|
||||
&B->cp_a7-LCL0,
|
||||
&B->cp_a8-LCL0,
|
||||
&B->cp_a9-LCL0,
|
||||
&B->cp_a10-LCL0,
|
||||
ctx));
|
||||
}
|
||||
default:
|
||||
@ -1595,7 +1596,7 @@ YAP_ExecuteFirst(PredEntry *pe, CPredicate exec_code)
|
||||
if (pe->PredFlags & CArgsPredFlag) {
|
||||
val = execute_cargs_back(pe, exec_code, ctx PASS_REGS);
|
||||
} else {
|
||||
val = ((codev)((&ARG1)-LCL0,0,ctx));
|
||||
val = ((codev)(B->cp_args-LCL0,0,ctx));
|
||||
}
|
||||
/* make sure we clean up the frames left by the user */
|
||||
while (execution != oexec)
|
||||
@ -1710,7 +1711,7 @@ YAP_ExecuteNext(PredEntry *pe, CPredicate exec_code)
|
||||
if (pe->PredFlags & CArgsPredFlag) {
|
||||
val = execute_cargs_back(pe, exec_code, ctx PASS_REGS);
|
||||
} else {
|
||||
val = ((codev)((&ARG1)-LCL0,0,ctx));
|
||||
val = ((codev)(B->cp_args-LCL0,0,ctx));
|
||||
}
|
||||
/* make sure we clean up the frames left by the user */
|
||||
while (execution != oexec)
|
||||
@ -1732,12 +1733,10 @@ YAP_ExecuteNext(PredEntry *pe, CPredicate exec_code)
|
||||
} else if (val == 1) { /* TRUE */
|
||||
cut_succeed();
|
||||
} else {
|
||||
/*
|
||||
if ((val & REDO_PTR) == REDO_PTR)
|
||||
if ((val & REDO_PTR) == REDO_PTR)
|
||||
ctx->context = (int *)(val & ~REDO_PTR);
|
||||
else
|
||||
else
|
||||
ctx->context = (int *)((val & ~REDO_PTR)>>FRG_REDO_BITS);
|
||||
*/
|
||||
}
|
||||
return TRUE;
|
||||
} else {
|
||||
|
@ -952,6 +952,8 @@ typedef struct choicept {
|
||||
#define cp_a6 cp_args[5]
|
||||
#define cp_a7 cp_args[6]
|
||||
#define cp_a8 cp_args[7]
|
||||
#define cp_a9 cp_args[8]
|
||||
#define cp_a10 cp_args[9]
|
||||
#define EXTRA_CBACK_ARG(Arity,Offset) B->cp_args[(Arity)+(Offset)-1]
|
||||
#else
|
||||
/* Otherwise, we need a very dirty trick to access the arguments */
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
:- module(system, [concat_atom/2,
|
||||
concat_atom/3,
|
||||
setenv/2,
|
||||
read_clause/1,
|
||||
string/1,
|
||||
chdir/1,
|
||||
@ -154,8 +153,6 @@ concat_atom(List, New) :-
|
||||
atomic_concat(List, New).
|
||||
|
||||
|
||||
setenv(X,Y) :- unix(putenv(X,Y)).
|
||||
|
||||
read_clause(X,Y) :-
|
||||
read_term(X,Y,[singetons(warning)]).
|
||||
|
||||
|
@ -2621,25 +2621,15 @@ PL_eval_expression_to_int64_ex(term_t t, int64_t *val)
|
||||
}
|
||||
|
||||
foreign_t
|
||||
_PL_retry(intptr_t n)
|
||||
_PL_retry(intptr_t v)
|
||||
{
|
||||
CACHE_REGS
|
||||
/* first we need to get the pointer to the predicate */
|
||||
PredEntry *pe = B->cp_ap->u.OtapFs.p;
|
||||
struct foreign_context *ctx = (struct foreign_context *)(&EXTRA_CBACK_ARG(pe->ArityOfPE,1));
|
||||
ctx->context = n;
|
||||
return LCL0-(CELL *)ctx;
|
||||
return (((uintptr_t)(v)<<FRG_REDO_BITS)|REDO_INT);
|
||||
}
|
||||
|
||||
foreign_t
|
||||
_PL_retry_address(void *addr)
|
||||
{
|
||||
CACHE_REGS
|
||||
/* first we need to get the pointer to the predicate */
|
||||
PredEntry *pe = B->cp_ap->u.OtapFs.p;
|
||||
struct foreign_context *ctx = (struct foreign_context *)(&EXTRA_CBACK_ARG(pe->ArityOfPE,1));
|
||||
ctx->context = (intptr_t)addr;
|
||||
return LCL0-(CELL *)ctx;
|
||||
return (((uintptr_t)(addr))|REDO_PTR);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 613a7199e420a734717f2887c17b4fc3ef30f67e
|
||||
Subproject commit 220431a82d88734dfcde0201655e9f67adfdd4a1
|
@ -1 +1 @@
|
||||
Subproject commit b9631d198164fbeebda627ed440ce2a36be84914
|
||||
Subproject commit 449c7a945cba6452634222592eef25f845d3e677
|
1
packages/plunit
Submodule
1
packages/plunit
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 52469bccc1cc81d06e47a3a781128689f4658542
|
@ -1,172 +0,0 @@
|
||||
[Jul 29 2009]
|
||||
|
||||
* ENHANCED: System and library modules now use =system= as their parent module rather than =user=.
|
||||
[Jun 15 2009]
|
||||
|
||||
* DOC: Item#483: Broken link in PlUnit. Ulrich Neumerkel.
|
||||
|
||||
[May 14 2009]
|
||||
|
||||
* DOC: module specifier, typos
|
||||
|
||||
[Mar 19 2009]
|
||||
|
||||
* CLEANUP: Removed all trailing whitespace from all source-files. This avoids many GIT (merge) warnings.
|
||||
[Mar 12 2009]
|
||||
|
||||
* MODIFIED: Renamed concat_atom/2,3 into atomic_list_concat/2,3 for compatibility. Moved the old predicates into library(backcomp). It is adviced to update your code, but 99% of the code will remain working because the old definitions are autoloaded.
|
||||
[Feb 11 2009]
|
||||
|
||||
* CLEANUP: Module declaration code
|
||||
[Sep 11 2008]
|
||||
|
||||
* PORT: Add AC_PREREQ to configure.h for systems that auto-select autoconf
|
||||
versions. After tip by Ulrich Neumerkel.
|
||||
|
||||
[Aug 11 2008]
|
||||
|
||||
* INSTALL: Remove all configure files from the git repository
|
||||
|
||||
[Jul 10 2008]
|
||||
|
||||
* FIXED: PlUnit: Perform goal_expansion/2 on setup, condition and cleanup options.
|
||||
Ivo Swartjes.
|
||||
|
||||
[Jun 26 2008]
|
||||
|
||||
* ADDED: forall option to unit tests that allows running the same test on
|
||||
different data sets.
|
||||
|
||||
* FIXED: wrong handling of gloabl option sto(false)
|
||||
[May 31 2008]
|
||||
|
||||
* FIXED: Properly support tests with condition and setup
|
||||
|
||||
[May 14 2008]
|
||||
|
||||
* ENHANCED: PlUnit: Test options in set_test_options/1; fix documentation.
|
||||
|
||||
[May 9 2008]
|
||||
|
||||
* ADDED: Allow specifying sto mode for an entire test set
|
||||
|
||||
[May 1 2008]
|
||||
|
||||
* DOC: plunit and non termination
|
||||
|
||||
[Apr 28 2008]
|
||||
|
||||
* ADDED: flag tests using `fixme'. Suggested under the name `desired' by
|
||||
Ulrich Neumerkel. Let us see how this works out in practice!
|
||||
|
||||
[Apr 24 2008]
|
||||
|
||||
* ENHANCED: When interrupting a thread, indicate which one.
|
||||
|
||||
* ADDED: PlUnit: running_tests/0 to find what test is running.
|
||||
|
||||
* FIXED: wrong dynamic declaration in plunit.pl
|
||||
|
||||
[Mar 11 2008]
|
||||
|
||||
* ENHANCED: Provide file/line info for errors in setup and condition
|
||||
|
||||
[Feb 4 2008]
|
||||
|
||||
* FIXED: STO independent of timings, Item#316
|
||||
|
||||
[Jan 31 2008]
|
||||
|
||||
* DOC: formatting
|
||||
|
||||
[Jan 30 2008]
|
||||
|
||||
* ENHANCED: PlUnit: run_tests(Unit) raise error on non-existing unit.
|
||||
Item#305, Ulrich Neumerkel.
|
||||
|
||||
* ENHANCED: PlUnit: reporting STO and setting STO checking options
|
||||
|
||||
[Jan 29 2008]
|
||||
|
||||
* CLEANUP: PlUnit: some reorganisation of the test loop, preparing for further
|
||||
enhancements.
|
||||
|
||||
[Jan 28 2008]
|
||||
|
||||
* ADDED: subsumes/2 and subsumes_chk/2 as built-in predicates.
|
||||
|
||||
* ENHANCED: Really load no tests if not asked to do and provide better feedback
|
||||
if no tests where loaded or all tests passed.
|
||||
|
||||
* FIXED: DCG: cut transparency, output unification, preserve erroneous non-terminals
|
||||
* ENHANCED: DCG: more tests
|
||||
* FIXED: PlUnit version confusion
|
||||
|
||||
[2008-01-26]
|
||||
|
||||
* FIXED: portability for SICStus 3.12
|
||||
* ENHANCED: new option sto
|
||||
|
||||
[Jan 14 2008]
|
||||
|
||||
* FIXED: Non-determinism in run_tests/0 if there are blocked tests.
|
||||
|
||||
[Dec 21 2007]
|
||||
|
||||
* FIXED: PlUnit printing of wrong answers if there are attributes. Ulrich
|
||||
Neumerkel, item#242
|
||||
Oct 30, 2007
|
||||
|
||||
* ENHANCED: Report error or failure in cleanup handler (Mark van Assem)
|
||||
* ENHANCED: Do not report '% All tests passed' if no tests are
|
||||
executed after a make/0.
|
||||
|
||||
Sep 21, 2007
|
||||
|
||||
* ENHANCED: Verify test-set and test options.
|
||||
|
||||
* ADDED: Allow for Var == Value as shorthand for true(Var == Value).
|
||||
|
||||
Sep 20, 2007
|
||||
|
||||
* ENHANCED: Message on wrong error
|
||||
|
||||
* DOCS: Fixed typos. Ulrich Neumerkel.
|
||||
|
||||
* FIXED: avoid a loop if a test throws an error holding a cyclic term.
|
||||
|
||||
* MODIFIED: errors are now verified using subsumes_chk/2 instead of =@=.
|
||||
|
||||
* PORT: =@= is now correctly bound to variant/2 for SICStus.
|
||||
|
||||
Aug 1, 2007
|
||||
|
||||
* FIXED: Ran failing tests twice due to unwanted choicepoint.
|
||||
|
||||
Jul 30, 2007
|
||||
|
||||
* ADDED: allow for error(Error) as alias for throws(error(Error, _)).
|
||||
|
||||
Mar 28, 2007
|
||||
|
||||
* ADDED: option silent(Bool) to force completely silent operation.
|
||||
|
||||
Mar 6, 2007
|
||||
|
||||
* Allow for single option without list
|
||||
|
||||
Nov 23, 2006
|
||||
|
||||
* Force start of line after ....
|
||||
|
||||
Oct 22, 2006
|
||||
|
||||
* Allow tracing body of tests. Suggested by Lukas Degener.
|
||||
|
||||
Oct 17, 2006
|
||||
|
||||
* Portability with SICStus 3.12.
|
||||
|
||||
Oct 13, 2006
|
||||
|
||||
* Better message for tests that succeeded but should have failed
|
@ -1,137 +0,0 @@
|
||||
################################################################
|
||||
# SWI-Prolog `PlDoc' package
|
||||
# Author: Jan Wielemaker. wielemak@science.uva.nl
|
||||
# Copyright: GPL (see COPYING or www.gnu.org
|
||||
################################################################
|
||||
|
||||
.SUFFIXES: .tex .dvi .doc .pl
|
||||
|
||||
SHELL=@SHELL@
|
||||
|
||||
ifeq (@PROLOG_SYSTEM@,yap)
|
||||
|
||||
prefix = @prefix@
|
||||
exec_prefix = @exec_prefix@
|
||||
ROOTDIR = $(prefix)
|
||||
EROOTDIR = @exec_prefix@
|
||||
|
||||
srcdir=@srcdir@
|
||||
|
||||
BINDIR = $(EROOTDIR)/bin
|
||||
LIBDIR=@libdir@
|
||||
YAPLIBDIR=@libdir@/Yap
|
||||
SHAREDIR=$(EROOTDIR)/share/Yap
|
||||
|
||||
PL=@INSTALL_ENV@ $(DESTDIR)$(BINDIR)/yap $(DESTDIR)$(YAPLIBDIR)/startup.yss
|
||||
LN_S=@LN_S@
|
||||
EXDIR=$(LIBDIR)/examples/plunit
|
||||
|
||||
INSTALLDIR=$(SHAREDIR)
|
||||
|
||||
else # SWI
|
||||
|
||||
srcdir=.
|
||||
|
||||
PLBASE=@PLBASE@
|
||||
PLARCH=@PLARCH@
|
||||
PL=@PL@
|
||||
XPCEBASE=$(PLBASE)/xpce
|
||||
PKGDOC=$(PLBASE)/doc/packages
|
||||
PCEHOME=../xpce
|
||||
LIBDIR=$(PLBASE)/library
|
||||
EXDIR=$(PKGDOC)/examples/plunit
|
||||
|
||||
INSTALLDIR=$(LIBDIR)
|
||||
|
||||
endif
|
||||
|
||||
DESTDIR=
|
||||
|
||||
DOCTOTEX=$(PCEHOME)/bin/doc2tex
|
||||
PLTOTEX=$(PCEHOME)/bin/pl2tex
|
||||
vLATEX=latex
|
||||
DOC=plunit
|
||||
TEX=$(DOC).tex
|
||||
DVI=$(DOC).dvi
|
||||
PDF=$(DOC).pdf
|
||||
HTML=$(DOC).html
|
||||
|
||||
INSTALL=@INSTALL@
|
||||
INSTALL_PROGRAM=@INSTALL_PROGRAM@
|
||||
INSTALL_DATA=@INSTALL_DATA@
|
||||
|
||||
LIBPL= $(srcdir)/plunit.pl $(srcdir)/test_wizard.pl $(srcdir)/test_cover.pl
|
||||
LIBALL= $(LIBPL) $(SUPPORT)
|
||||
EXAMPLES= $(srcdir)/read.pl $(srcdir)/simple.pl
|
||||
|
||||
all:
|
||||
@echo "Nothing to do for this package"
|
||||
|
||||
install: $(LIBPL)
|
||||
mkdir -p $(DESTDIR)$(INSTALLDIR)
|
||||
$(INSTALL_DATA) $(LIBALL) $(DESTDIR)$(INSTALLDIR)
|
||||
$(PL) -f none -g make -t halt
|
||||
|
||||
ln-install: $(LIBPL)
|
||||
mkdir -p $(DESTDIR)$(INSTALLDIR)
|
||||
for f in $(LIBALL); do \
|
||||
rm -f $(DESTDIR)$(INSTALLDIR)/$$f; \
|
||||
ln -s `pwd`/$$f $(DESTDIR)$(INSTALLDIR); \
|
||||
done
|
||||
$(PL) -f none -g make -t halt
|
||||
|
||||
rpm-install: install
|
||||
|
||||
pdf-install: install-examples
|
||||
$(INSTALL_DATA) $(DOC).pdf $(DESTDIR)$(PKGDOC)
|
||||
|
||||
html-install: install-examples
|
||||
$(INSTALL_DATA) $(DOC).html $(DESTDIR)$(PKGDOC)
|
||||
|
||||
install-examples::
|
||||
mkdir -p $(DESTDIR)$(EXDIR)
|
||||
(cd examples && $(INSTALL_DATA) $(EXAMPLES) $(DESTDIR)$(EXDIR))
|
||||
|
||||
uninstall::
|
||||
rm -f $(DESTDIR)$(INSTALLDIR)/plunit.pl
|
||||
$(PL) -f none -g make -t halt
|
||||
|
||||
check::
|
||||
true
|
||||
|
||||
################################################################
|
||||
# Documentation
|
||||
################################################################
|
||||
|
||||
doc: $(PDF) $(HTML)
|
||||
pdf: $(PDF)
|
||||
html: $(HTML)
|
||||
|
||||
$(HTML): $(TEX)
|
||||
latex2html $(DOC)
|
||||
mv html/index.html $@
|
||||
rmdir html
|
||||
|
||||
$(PDF): $(TEX)
|
||||
../../man/runtex --pdf $(DOC)
|
||||
|
||||
$(TEX): $(DOCTOTEX)
|
||||
|
||||
.doc.tex:
|
||||
$(DOCTOTEX) $*.doc > $*.tex
|
||||
.pl.tex:
|
||||
$(PLTOTEX) $*.pl > $*.tex
|
||||
|
||||
################################################################
|
||||
# Clean
|
||||
################################################################
|
||||
|
||||
clean:
|
||||
rm -f *~ *% config.log
|
||||
|
||||
# rm -f $(TEX)
|
||||
# ../../man/runtex --clean $(DOC)
|
||||
# rm -rf html
|
||||
|
||||
distclean: clean
|
||||
rm -f $(TARGETS) config.cache config.status Makefile
|
@ -1,46 +0,0 @@
|
||||
Prolog Unit Tests
|
||||
=================
|
||||
|
||||
|
||||
---++ TBD
|
||||
|
||||
* Add options to test units
|
||||
- Condition, setup, cleanup, blocked [OK]
|
||||
* Run the tests on make, etc. [OK]
|
||||
* Report generation options
|
||||
|
||||
---++ What to do with loaded tests?
|
||||
|
||||
* Keep them around
|
||||
* Remove them
|
||||
* Flag module as `volatile', so it is not saved.
|
||||
|
||||
---++ Distinguish user/system/library tests
|
||||
|
||||
---++ Deal with seperate test-files:
|
||||
|
||||
* load_test_files(+Options)
|
||||
|
||||
---++ Volatile modules?
|
||||
|
||||
---++ Conversion of system tests:
|
||||
|
||||
- test.pl internals [OK]
|
||||
- scripts called from test.pl [OK]
|
||||
- tests for packages
|
||||
|
||||
---++ Run tests concurrently?
|
||||
|
||||
+ Tests applications for concurrent execution
|
||||
+ Saves time, notably on tests doing networking, timeout, etc.
|
||||
- Might be more complicated to understand
|
||||
|
||||
---++ Test wizard
|
||||
|
||||
* Write tests to .plt files
|
||||
* Merge with existing content of .plt files
|
||||
- Read with comments and write back?
|
||||
|
||||
---++ Coverage analysis
|
||||
|
||||
* See cover.pl
|
@ -1,27 +0,0 @@
|
||||
|
||||
:- use_module(library(plunit)).
|
||||
|
||||
:- begin_tests(read).
|
||||
|
||||
:- use_module(library(system)).
|
||||
|
||||
:- use_module(library(readutil)).
|
||||
|
||||
create_file(Tmp) :-
|
||||
tmp_file(plunit, Tmp),
|
||||
open(Tmp, write, Out),
|
||||
write(Out, 'hello(World).\n'),
|
||||
close(Out).
|
||||
|
||||
test(read, [ setup(create_file(Tmp)),
|
||||
cleanup(delete_file(Tmp))
|
||||
]) :-
|
||||
read_file_to_terms(Tmp, Terms, []),
|
||||
Terms =@= [hello(_)].
|
||||
|
||||
:- end_tests(read).
|
||||
|
||||
:- trace,run_tests.
|
||||
|
||||
|
||||
|
@ -1,37 +0,0 @@
|
||||
:- module(ex_simple, []).
|
||||
|
||||
:- use_module(library(plunit)).
|
||||
|
||||
:- begin_tests(lists).
|
||||
|
||||
test(true) :-
|
||||
true.
|
||||
|
||||
test(fail) :-
|
||||
\+ fail.
|
||||
|
||||
test(fail, [fail]) :-
|
||||
fail.
|
||||
|
||||
test(member) :-
|
||||
member(a, [a]), !.
|
||||
|
||||
test(member, [nondet]) :-
|
||||
member(_, [a]).
|
||||
|
||||
test(member, [true(X == a)]) :-
|
||||
member(X, [a]), !.
|
||||
|
||||
test(member, [all(V == [a,b,c])]) :-
|
||||
member(V, [a,b,c]).
|
||||
|
||||
test(append) :-
|
||||
append("aap", "noot", X),
|
||||
X == "aapnoot".
|
||||
|
||||
:- end_tests(lists).
|
||||
|
||||
:- run_tests.
|
||||
|
||||
:- run_tests(lists:member).
|
||||
|
@ -1,671 +0,0 @@
|
||||
\documentclass[11pt]{article}
|
||||
\usepackage{times}
|
||||
\usepackage{pl}
|
||||
\usepackage{html}
|
||||
\usepackage{plpage}
|
||||
\sloppy
|
||||
\makeindex
|
||||
|
||||
\onefile
|
||||
\htmloutput{html} % Output directory
|
||||
\htmlmainfile{index} % Main document file
|
||||
\bodycolor{white} % Page colour
|
||||
|
||||
\renewcommand{\runningtitle}{Prolog Unit Tests}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{Prolog Unit Tests}
|
||||
\author{Jan Wielemaker \\
|
||||
HCS, \\
|
||||
University of Amsterdam \\
|
||||
The Netherlands \\
|
||||
E-mail: \email{wielemak@science.uva.nl}}
|
||||
|
||||
\maketitle
|
||||
|
||||
\begin{abstract}
|
||||
This document describes a Prolog unit-test framework. This framework was
|
||||
initially developed for \url[SWI-Prolog]{http://www.swi-prolog.org}. The
|
||||
current version also runs on \url[SICStus
|
||||
Prolog]{http://www.sics.se/sicstus/}, providing a portable testing
|
||||
framework. See \secref{sicstus}.
|
||||
\end{abstract}
|
||||
|
||||
\pagebreak
|
||||
\tableofcontents
|
||||
|
||||
\vfill
|
||||
\vfill
|
||||
|
||||
\newpage
|
||||
|
||||
\section{Introduction}
|
||||
\label{sec:intro}
|
||||
|
||||
There is really no excuse not to write tests!
|
||||
|
||||
Automatic testing of software during development is probably the most
|
||||
important Quality Assurance measure. Tests can validate the final
|
||||
system, which is nice for your users. However, most (Prolog) developers
|
||||
forget that it is not just a burden during development.
|
||||
|
||||
\begin{itemize}
|
||||
\item Tests document how the code is supposed to be used.
|
||||
\item Tests can validate claims you make on the Prolog
|
||||
implementation. Writing a test makes the claim
|
||||
explicit.
|
||||
\item Tests avoid big applications saying `No' after
|
||||
modifications. This saves time during development,
|
||||
and it saves \emph{a lot} of time if you must return
|
||||
to the application a few years later or you must
|
||||
modify and debug someone else's application.
|
||||
\end{itemize}
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{A Unit Test box}
|
||||
\label{sec:unitbox}
|
||||
|
||||
Tests are written in pure Prolog and enclosed within the directives
|
||||
begin_tests/1,2 and end_tests/1. They can be embedded inside a normal
|
||||
source module, or be placed in a separate test-file that loads the files
|
||||
to be tested. Code inside a test box is normal Prolog code. The
|
||||
entry points are defined by rules using the head \term{test}{Name} or
|
||||
\term{test}{Name, Options}, where \arg{Name} is a ground term and
|
||||
\arg{Options} is a list describing additional properties of the test.
|
||||
Here is a very simple example:
|
||||
|
||||
\begin{code}
|
||||
:- begin_tests(lists).
|
||||
:- use_module(library(lists)).
|
||||
|
||||
test(reverse) :-
|
||||
reverse([a,b], [b,a]).
|
||||
|
||||
:- end_tests(lists).
|
||||
\end{code}
|
||||
|
||||
The optional second argument of the test-head defines additional processing
|
||||
options. Defined options are:
|
||||
|
||||
\begin{description}
|
||||
\termitem{blocked}{+Reason:atom}
|
||||
The test is currently disabled. Tests are flagged as blocked if they
|
||||
cannot be run for some reason. E.g.\ they crash Prolog, they rely on
|
||||
some service that is not available, they take too much resources, etc.
|
||||
Tests that fail but do not crash, etc.\ should be flagged using
|
||||
\term{fixme}{Fixme}.
|
||||
|
||||
\termitem{fixme}{+Reason:atom}
|
||||
Similar to \term{blocked}{Reason}, but the test it executed anyway. If
|
||||
it fails, a \const{-} is printed instead of the \const{.} character. If
|
||||
it passes a \const{+} and if it passes with a choicepoint, \const{!}.
|
||||
A summary is printed at the end of the test run and the goal
|
||||
\term{test_report}{fixme} can be used to get details.
|
||||
|
||||
\termitem{condition}{:Goal}
|
||||
Pre-condition for running the test. If the condition fails
|
||||
the test is skipped. The condition can be used as an alternative
|
||||
to the \const{setup} option. The only difference is that failure
|
||||
of a condition skips the test and is considered an error when using
|
||||
the \const{setup} option.
|
||||
|
||||
\termitem{cleanup}{:Goal}
|
||||
\arg{Goal} is always called after completion of the test-body,
|
||||
regardless of whether it fails, succeeds or throws an exception. This
|
||||
option or call_cleanup/2 must be used by tests that require side-effects
|
||||
that must be reverted after the test completes. \arg{Goal} may share
|
||||
variables with the test body.
|
||||
|
||||
\begin{code}
|
||||
create_file(Tmp) :-
|
||||
tmp_file(plunit, Tmp),
|
||||
open(Tmp, write, Out),
|
||||
write(Out, 'hello(World).\n'),
|
||||
close(Out).
|
||||
|
||||
test(read, [ setup(create_file(Tmp)),
|
||||
cleanup(delete_file(Tmp))
|
||||
]) :-
|
||||
read_file_to_terms(Tmp, Terms, []),
|
||||
Term = hello(_).
|
||||
\end{code}
|
||||
|
||||
\termitem{setup}{:Goal}
|
||||
\arg{Goal} is run before the test-body. Typically used together with
|
||||
the \const{cleanup} option to create and destroy the required execution
|
||||
environment.
|
||||
|
||||
\termitem{forall}{:Generator}
|
||||
Run the same test for each solution of \arg{Generator}. Each run invokes
|
||||
the setup and cleanup handlers. This can be used to run the same test
|
||||
with different inputs. If an error occurs, the test is reported as
|
||||
\mbox{\texttt{name (forall bindings = } <vars> \texttt{)}}, where
|
||||
<vars> indicates the bindings of variables in \arg{Generator}.
|
||||
|
||||
\termitem{true}{AnswerTerm Cmp Value}
|
||||
Body must succeed deterministically. AnswerTerm is compared to Value
|
||||
using the comparison operator \arg{Cmp}. \arg{Cmp} is typically one of
|
||||
=/2, ==/2, =:=/2 or =@=/2,%
|
||||
\footnote{The =@= predicate (denoted \emph{structural equivalence})
|
||||
is the same as variant/2 in SICStus.}
|
||||
but any test can be used. This is the same as inserting the test at the
|
||||
end of the conjunction, but it allows the test engine to distinguish
|
||||
between failure of copy_term/2 and producing the wrong value. Multiple
|
||||
variables must be combined in an arbitrary compound term. E.g.\
|
||||
\verb$A1-A2 == v1-v2$
|
||||
|
||||
\begin{code}
|
||||
test(copy, [ true(Copy =@= hello(X,X))
|
||||
]) :-
|
||||
copy_term(hello(Y,Y), Copy).
|
||||
\end{code}
|
||||
|
||||
\termitem{AnswerTerm Cmp Value}
|
||||
Equivalent to \term{true}{AnswerTerm Cmp Value} if \arg{Cmp} is one
|
||||
of the comparison operators given above.
|
||||
|
||||
\termitem{fail}{}
|
||||
Body must fail.
|
||||
|
||||
\termitem{throws}{Error}
|
||||
Body must throw \arg{Error}. The error is verified using
|
||||
\term{subsumes_chk}{Error, Generated}. I.e.\ the generated error
|
||||
must be more specific than the specified \arg{Error}.
|
||||
|
||||
\termitem{error}{Error}
|
||||
Body must throw \term{error}{Error, _Context}. See \const{throws}
|
||||
for details.
|
||||
|
||||
\termitem{all}{AnswerTerm Cmp Instances}
|
||||
Similar to \term{true}{AnswerTerm Cmp Values}, but used for non-deterministic
|
||||
predicates. Each element is compared using \arg{Cmp}. Order matters. For
|
||||
example:
|
||||
|
||||
\begin{code}
|
||||
test(or, all(X == [1,2])) :-
|
||||
( X = 1 ; X = 2 ).
|
||||
\end{code}
|
||||
|
||||
\termitem{set}{AnswerTerm Cmp Instances}
|
||||
Similar to \term{all}{AnswerTerm Cmp Instances}, but ignores order and
|
||||
duplicates with respect to \arg{Cmp} in the comparison. Each element is
|
||||
compared using \arg{Cmp}.
|
||||
|
||||
\termitem{nondet}{}
|
||||
If this keyword appears in the option list, non-deterministic success
|
||||
of the body is not considered an error.
|
||||
|
||||
\termitem{sto}{Terms}
|
||||
Declares that executing body is subject to occurs-check (STO). The
|
||||
test is executed with \arg{Terms}. \arg{Terms} is either
|
||||
\const{rational_trees} or \const{finite_trees}. STO programs are not
|
||||
portable between different kinds of terms. Only programs \emph{not}
|
||||
subject to occurs-check (NSTO) are portable\footnote{See 7.3.3 of
|
||||
ISO/IEC 13211-1 PROLOG: Part 1 - General Core, for a detailed
|
||||
discussion of STO and NSTO}. Fortunately, most practical programs are
|
||||
NSTO. Writing tests that are STO is still useful to ensure the
|
||||
robustness of a predicate. In case sto4 and sto5 below, an infinite
|
||||
list (a rational tree) is created prior to calling the actual
|
||||
predicate. Ideally, such cases produce a type error or fail silently.
|
||||
|
||||
\begin{code}
|
||||
test(sto1, [sto(rational_trees)]) :-
|
||||
X=s(X).
|
||||
test(sto2, [sto(finite_trees),fail]) :-
|
||||
X=s(X).
|
||||
test(sto3, [sto(rational_trees), fail]) :-
|
||||
X=s(X), fail.
|
||||
test(sto4, [sto(rational_trees),error(type_error(list,L))]) :-
|
||||
L = [_|L], length(L,_).
|
||||
test(sto5, [sto(rational_trees),fail]) :-
|
||||
L = [_|L], length(L,3).
|
||||
\end{code}
|
||||
|
||||
Programs that depend on STO cases tend to be inefficient, even
|
||||
incorrect, are hard to understand and debug, and terminate poorly. It
|
||||
is therefore advisable to avoid STO programs whenever possible.
|
||||
|
||||
SWI's Prolog flag \prologflag{occurs_check} must not be modified
|
||||
within plunit tests.
|
||||
|
||||
\end{description}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Test Unit options}
|
||||
\label{sec:unitoptions}
|
||||
|
||||
\begin{description}
|
||||
\predicate{begin_tests}{1}{+Name}
|
||||
Start named test-unit. Same as \verb$begin_tests(Name, [])$.
|
||||
|
||||
\predicate{begin_tests}{2}{+Name, +Options}
|
||||
Start named test-unit with options. Options provide conditional
|
||||
processing, setup and cleanup similar to individual tests (second
|
||||
argument of test/2 rules).
|
||||
|
||||
Defined options are:
|
||||
\begin{description}
|
||||
\termitem{blocked}{+Reason}
|
||||
Test-unit has been blocked for the given \arg{Reason}.
|
||||
|
||||
\termitem{condition}{:Goal}
|
||||
Executed before executing any of the tests. If \arg{Goal} fails,
|
||||
the test of this unit is skipped.
|
||||
|
||||
\termitem{setup}{:Goal}
|
||||
Executed before executing any of the tests.
|
||||
|
||||
\termitem{cleanup}{:Goal}
|
||||
Executed after completion of all tests in the unit.
|
||||
|
||||
\termitem{sto}{+Terms}
|
||||
Specify default for subject-to-occurs-check mode. See \secref{unitbox}
|
||||
for details on the sto option.
|
||||
\end{description}
|
||||
\end{description}
|
||||
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Writing the test body}
|
||||
\label{sec:testbody}
|
||||
|
||||
The test-body is ordinary Prolog code. Without any options, the body
|
||||
must be designed to succeed \emph{deterministically}. Any other result
|
||||
is considered a failure. One of the options \const{fail}, \const{true},
|
||||
\const{throws}, \const{all} or \const{set} can be used to specify a
|
||||
different expected result. See \secref{unitbox} for details. In this
|
||||
section we illustrate typical test-scenarios by testing SWI-Prolog
|
||||
built-in and library predicates.
|
||||
|
||||
\subsubsection{Testing deterministic predicates}
|
||||
\label{sec:testdet}
|
||||
|
||||
Deterministic predicates are predicates that must succeed exactly once
|
||||
and, for well behaved predicates, leave no choicepoints. Typically they
|
||||
have zero or more input- and zero or more output arguments. The test
|
||||
goal supplies proper values for the input arguments and verifies the
|
||||
output arguments. Verification can use test-options or be explicit in
|
||||
the body. The tests in the example below are equivalent.
|
||||
|
||||
\begin{code}
|
||||
test(add) :-
|
||||
A is 1 + 2,
|
||||
A =:= 3.
|
||||
|
||||
test(add, [true(A =:= 3)]) :-
|
||||
A is 1 + 2.
|
||||
\end{code}
|
||||
|
||||
The test engine verifies that the test-body does not leave a
|
||||
choicepoint. We illustrate that using the test below:
|
||||
|
||||
\begin{code}
|
||||
test(member) :-
|
||||
member(b, [a,b,c]).
|
||||
\end{code}
|
||||
|
||||
Although this test succeeds, member/2 leaves a choicepoint which is
|
||||
reported by the test subsystem. To make the test silent, use one of
|
||||
the alternatives below.
|
||||
|
||||
\begin{code}
|
||||
test(member) :-
|
||||
member(b, [a,b,c]), !.
|
||||
|
||||
test(member, [nondet]) :-
|
||||
member(b, [a,b,c]).
|
||||
\end{code}
|
||||
|
||||
\subsubsection{Testing semi-deterministic predicates}
|
||||
\label{sec:testsemidet}
|
||||
|
||||
Semi-deterministic predicates are predicates that either fail or succeed
|
||||
exactly once and, for well behaved predicates, leave no choicepoints.
|
||||
Testing such predicates is the same as testing deterministic
|
||||
predicates. Negative tests must be specified using the option
|
||||
\const{fail} or by negating the body using \verb$\+/1$.
|
||||
|
||||
\begin{code}
|
||||
test(is_set) :-
|
||||
\+ is_set([a,a]).
|
||||
|
||||
test(is_set, [fail]) :-
|
||||
is_set([a,a]).
|
||||
\end{code}
|
||||
|
||||
|
||||
\subsubsection{Testing non-deterministic predicates}
|
||||
\label{sec:testnondet}
|
||||
|
||||
Non-deterministic predicates succeed zero or more times. Their results
|
||||
are tested either using findall/3 or setof/3 followed by a value-check
|
||||
or using the \const{all} or \const{set} options. The following are
|
||||
equivalent tests:
|
||||
|
||||
\begin{code}
|
||||
test(member) :-
|
||||
findall(X, member(X, [a,b,c]), Xs),
|
||||
Xs == [a,b,c].
|
||||
|
||||
test(member, all(X == [a,b,c])) :-
|
||||
member(X, [a,b,c]).
|
||||
\end{code}
|
||||
|
||||
\subsubsection{Testing error conditions}
|
||||
\label{sec:testerror}
|
||||
|
||||
Error-conditions are tested using the option \term{throws}{Error} or
|
||||
by wrapping the test in a catch/3. The following tests are equivalent:
|
||||
|
||||
\begin{code}
|
||||
test(div0) :-
|
||||
catch(A is 1/0, error(E, _), true),
|
||||
E =@= evaluation_error(zero_divisor).
|
||||
|
||||
test(div0, [error(evaluation_error(zero_divisor))]) :-
|
||||
A is 1/0.
|
||||
\end{code}
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Using separate test files}
|
||||
\label{sec:testfiles}
|
||||
|
||||
Test-units can be embedded in normal Prolog source-files. Alternatively,
|
||||
tests for a source-file can be placed in another file alongside the file
|
||||
to be tested. Test files use the extension \fileext{plt}. The predicate
|
||||
load_test_files/1 can load all files that are related to source-files
|
||||
loaded into the current project.
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Running the test-suite}
|
||||
\label{sec:running}
|
||||
|
||||
At any time, the tests can be executed by loading the program and
|
||||
running run_tests/0 or run_tests(+Unit).
|
||||
|
||||
\begin{description}
|
||||
\predicate{run_tests}{0}{}
|
||||
Run all test-units.
|
||||
|
||||
\predicate{run_tests}{1}{+Spec}
|
||||
Run only the specified tests. \arg{Spec} can be a list to run multiple
|
||||
tests. A single specification is either the name of a test unit or
|
||||
a term <Unit>:<Tests>, running only the specified test. <Tests> is
|
||||
either the name of a test or a list of names. Running particular
|
||||
tests is particularly useful for tracing a test:%
|
||||
\footnote{Unfortunately the body of the test is called through
|
||||
meta-calling, so it cannot be traced. The called user-code can be traced
|
||||
normally though.}
|
||||
|
||||
\begin{code}
|
||||
?- gtrace, run_tests(lists:member).
|
||||
\end{code}
|
||||
\end{description}
|
||||
|
||||
To identify nonterminating tests, interrupt the looping process with
|
||||
\emph{Control-C}. The test name and location will be displayed.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Tests and production systems}
|
||||
\label{sec:state}
|
||||
|
||||
Most applications do not want the test-suite to end up in the
|
||||
final application. There are several ways to achieve this. One
|
||||
is to place all tests in separate files and not to load the tests
|
||||
when creating the production environment. Alternatively, use the
|
||||
directive below before loading the application.
|
||||
|
||||
\begin{code}
|
||||
:- set_test_options([load(never)]).
|
||||
\end{code}
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Controlling the test suite}
|
||||
\label{sec:options}
|
||||
|
||||
\begin{description}
|
||||
\predicate{set_test_options}{1}{+Options}
|
||||
Defined options are:
|
||||
|
||||
\begin{description}
|
||||
\termitem{load}{+Load}
|
||||
Determines whether or not tests are loaded. When \const{never},
|
||||
everything between begin_tests/1 and end_tests/1 is simply ignored.
|
||||
When \const{always}, tests are always loaded. Finally, when using
|
||||
the default value \const{normal}, tests are loaded if the code is
|
||||
not compiled with optimisation turned on.
|
||||
|
||||
\termitem{run}{+Run}
|
||||
Specifies when tests are run. Using \const{manual}, tests can only be
|
||||
run using run_tests/0 or run_tests/1. Using \const{make}, tests will be
|
||||
run for reloaded files, but not for files loaded the first time. Using
|
||||
\const{make(all)} make/0 will run all test-suites, not only those that
|
||||
belong to files that are reloaded.
|
||||
|
||||
\termitem{silent}{+Bool}
|
||||
When \const{true} (default is \const{false}), send informational
|
||||
messages using the `silent' level. In practice this means there
|
||||
is no output except for errors.
|
||||
|
||||
\termitem{sto}{+Bool}
|
||||
When \const{true} (default \const{false}), assume tests are not subject
|
||||
to occurs check (non-STO) and verify this if the Prolog implementation
|
||||
supports testing this.
|
||||
\end{description}
|
||||
|
||||
\predicate{load_test_files}{1}{+Options}
|
||||
Load \fileext{plt} test-files that belong to the currently loaded
|
||||
sources.
|
||||
|
||||
\predicate{running_tests}{0}{}
|
||||
Print all currently running tests to the terminal. It can be used
|
||||
to find running thread in multi-threaded test operation or find the
|
||||
currently running test if a test appears to be blocking.
|
||||
|
||||
\predicate{test_report}{1}{+What}
|
||||
Print report on the executed tests. \arg{What} defines the type
|
||||
of report. Currently this only supports \const{fixme}, providing
|
||||
details on how the fixme-flagged tests proceeded.
|
||||
\end{description}
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Auto-generating tests}
|
||||
\label{sec:wizard}
|
||||
|
||||
Prolog is an interactive environment. Where users of non-interactive
|
||||
systems tend to write tests as code, Prolog developers tend to run
|
||||
queries interactively during development. This interactive testing is
|
||||
generally faster, but the disadvantage is that the tests are lost at the
|
||||
end of the session. The test-wizard tries to combine the advantages. It
|
||||
collects toplevel queries and saves them to a specified file. Later,
|
||||
it extracts these queries from the file and locates the predicates that
|
||||
are tested by the queries. It runs the query and creates a test clause
|
||||
from the query.
|
||||
|
||||
Auto-generating test cases is experimentally supported through the
|
||||
library \pllib{test_wizard}. We briefly introduce the functionality
|
||||
using examples. First step is to log the queries into a file. This is
|
||||
accomplished with the commands below. \file{Queries.pl} is the name in
|
||||
which to store all queries. The user can choose any filename for this
|
||||
purpose. Multiple Prolog instances can share the same name, as data
|
||||
is appended to this file and write is properly locked to avoid file
|
||||
corruption.
|
||||
|
||||
\begin{code}
|
||||
:- use_module(library(test_wizard)).
|
||||
:- set_prolog_flag(log_query_file, 'Queries.pl').
|
||||
\end{code}
|
||||
|
||||
Next, we will illustrate using the library by testing the predicates
|
||||
from library \pllib{lists}. To generate test cases we just make calls
|
||||
on the terminal. Note that all queries are recorded and the system will
|
||||
select the appropriate ones when generating the test unit for a
|
||||
particular module.
|
||||
|
||||
\begin{code}
|
||||
?- member(b, [a,b]).
|
||||
Yes
|
||||
?- reverse([a,b], [b|A]).
|
||||
A = [a] ;
|
||||
No
|
||||
\end{code}
|
||||
|
||||
Now we can generate the test-cases for the module list using
|
||||
make_tests/3:
|
||||
|
||||
\begin{code}
|
||||
?- make_tests(lists, 'Queries.pl', current_output).
|
||||
:- begin_tests(lists).
|
||||
|
||||
test(member, [nondet]) :-
|
||||
member(b, [a, b]).
|
||||
test(reverse, [true(A==[a])]) :-
|
||||
reverse([a, b], [b|A]).
|
||||
|
||||
:- end_tests(lists).
|
||||
\end{code}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Coverage analysis}
|
||||
\label{sec:cover}
|
||||
|
||||
An important aspect of tests is to know which parts of program
|
||||
is used (\emph{covered}) by the tests. An experimental analysis
|
||||
is provided by the library \pllib{test_cover}.
|
||||
|
||||
\begin{description}
|
||||
\predicate{show_coverage}{1}{:Goal}
|
||||
Run \arg{Goal} and write a report on which percentage of the clauses in
|
||||
each file are used by the program and which percentage of the clauses
|
||||
always fail.
|
||||
\end{description}
|
||||
|
||||
We illustrate this here using CHAT, a natural language question and
|
||||
answer application by David H.D. Warren and Fernando C.N. Pereira.
|
||||
|
||||
\begin{code}
|
||||
1 ?- show_coverage(test_chat).
|
||||
Chat Natural Language Question Answering Test
|
||||
...
|
||||
|
||||
==================================================================
|
||||
Coverage by File
|
||||
==================================================================
|
||||
File Clauses %Cov %Fail
|
||||
==================================================================
|
||||
/staff/jan/lib/prolog/chat/xgrun.pl 5 100.0 0.0
|
||||
/staff/jan/lib/prolog/chat/newg.pl 186 89.2 18.3
|
||||
/staff/jan/lib/prolog/chat/clotab.pl 28 89.3 0.0
|
||||
/staff/jan/lib/prolog/chat/newdic.pl 275 35.6 0.0
|
||||
/staff/jan/lib/prolog/chat/slots.pl 128 74.2 1.6
|
||||
/staff/jan/lib/prolog/chat/scopes.pl 132 70.5 3.0
|
||||
/staff/jan/lib/prolog/chat/templa.pl 67 55.2 1.5
|
||||
/staff/jan/lib/prolog/chat/qplan.pl 106 75.5 0.9
|
||||
/staff/jan/lib/prolog/chat/talkr.pl 60 20.0 1.7
|
||||
/staff/jan/lib/prolog/chat/ndtabl.pl 42 59.5 0.0
|
||||
/staff/jan/lib/prolog/chat/aggreg.pl 47 48.9 2.1
|
||||
/staff/jan/lib/prolog/chat/world0.pl 131 71.8 1.5
|
||||
/staff/jan/lib/prolog/chat/rivers.pl 41 100.0 0.0
|
||||
/staff/jan/lib/prolog/chat/cities.pl 76 43.4 0.0
|
||||
/staff/jan/lib/prolog/chat/countr.pl 156 100.0 0.0
|
||||
/staff/jan/lib/prolog/chat/contai.pl 334 100.0 0.0
|
||||
/staff/jan/lib/prolog/chat/border.pl 857 98.6 0.0
|
||||
/staff/jan/lib/prolog/chat/chattop.pl 139 43.9 0.7
|
||||
==================================================================
|
||||
\end{code}
|
||||
|
||||
Using \verb$?- show_coverage(run_tests).$, this library currently only
|
||||
shows some rough quality measure for test-suite. Later versions should
|
||||
provide a report to the developer identifying which clauses are covered,
|
||||
not covered and always failed.
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Portability of the test-suite}
|
||||
\label{sec:porting}
|
||||
|
||||
One of the reasons to have tests is to simplify migrating code between
|
||||
Prolog implementations. Unfortunately creating a portable test-suite
|
||||
implies a poor integration into the development environment. Luckily,
|
||||
the specification of the test-system proposed here can be ported quite
|
||||
easily to most Prolog systems sufficiently compatible to SWI-Prolog to
|
||||
consider porting your application. Most important is to have support for
|
||||
term_expansion/2.
|
||||
|
||||
In the current system, test units are compiled into sub-modules of the
|
||||
module in which they appear. Few Prolog systems allow for sub-modules
|
||||
and therefore ports may have to fall-back to inject the code in the
|
||||
surrounding module. This implies that support predicates used inside
|
||||
the test unit should not conflict with predicates of the module being
|
||||
tested.
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{PlUnit on SICStus}
|
||||
\label{sec:sicstus}
|
||||
|
||||
The directory of \file{plunit.pl} and \file{swi.pl} must be in the
|
||||
\const{library} search-path. With PLUNITDIR replaced accordingly,
|
||||
add the following into your \file{.sicstusrc} or \file{sicstus.ini}.
|
||||
|
||||
\begin{code}
|
||||
:- set_prolog_flag(language, iso). % for maximal compatibility
|
||||
library_directory('PLUNITDIR').
|
||||
\end{code}
|
||||
|
||||
The current version runs under SICStus 3. Open issues:
|
||||
|
||||
\begin{itemize}
|
||||
|
||||
\item Some messages are unformatted because SICStus 3 reports
|
||||
all ISO errors as instantiation errors.
|
||||
|
||||
\item Only \file{plunit.pl}. Both coverage analysis and the test
|
||||
generation wizard currently require SWI-Prolog.
|
||||
|
||||
\item The \const{load} option \const{normal} is the same as \const{always}.
|
||||
Use \exam{set_test_options(load, never)} to avoid loading the
|
||||
test suites.
|
||||
|
||||
\item The \const{run} option is not supported.
|
||||
|
||||
\item Tests are loaded into the enclosing module instead of a separate
|
||||
test module. This means that predicates in the test module must
|
||||
not conflict with the enclosing module, nor with other test
|
||||
modules loaded into the same module.
|
||||
\end{itemize}
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Motivation of choices}
|
||||
\label{sec:motivation}
|
||||
|
||||
\subsection*{Easy to understand and flexible}
|
||||
|
||||
There are two approaches for testing. In one extreme the tests are
|
||||
written using declarations dealing with setup, cleanup, running and
|
||||
testing the result. In the other extreme a test is simply a Prolog goal
|
||||
that is supposed to succeed. We have chosen to allow for any mixture of
|
||||
these approaches. Written down as test/1 we opt for the simple
|
||||
succeeding goal approach. Using options to the test the user can choose
|
||||
for a more declarative specification. The user can mix both approaches.
|
||||
|
||||
The body of the test appears at the position of a clause-body. This
|
||||
simplifies identification of the test body and ensures proper layout and
|
||||
colouring support from the editor without the need for explicit support
|
||||
of the unit test module. Only clauses of test/1 and test/2 may be marked
|
||||
as non-called in environments that perform cross-referencing.
|
||||
|
||||
%\subsection*{Well integrated}
|
||||
|
||||
\printindex
|
||||
|
||||
\end{document}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,251 +0,0 @@
|
||||
/* $Id$
|
||||
|
||||
Part of SWI-Prolog
|
||||
|
||||
Author: Jan Wielemaker
|
||||
E-mail: wielemak@science.uva.nl
|
||||
WWW: http://www.swi-prolog.org
|
||||
Copyright (C): 1985-2006, University of Amsterdam
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; either version 2
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
As a special exception, if you link this library with other files,
|
||||
compiled with a Free Software compiler, to produce an executable, this
|
||||
library does not by itself cause the resulting executable to be covered
|
||||
by the GNU General Public License. This exception does not however
|
||||
invalidate any other reasons why the executable file might be covered by
|
||||
the GNU General Public License.
|
||||
*/
|
||||
|
||||
:- module(prolog_cover,
|
||||
[ show_coverage/1, % :Goal
|
||||
covered_clauses/4 % +Goal, -Result, -Succeeded, -Failed
|
||||
]).
|
||||
:- use_module(library(ordsets)).
|
||||
|
||||
:- set_prolog_flag(generate_debug_info, false).
|
||||
|
||||
/** <module> Clause cover analysis
|
||||
|
||||
The purpose of this module is to find which part of the program has been
|
||||
use by a certain goal. Usage is defined in terms of clauses that have
|
||||
fired, seperated in clauses that succeeded at least once and clauses
|
||||
that failed on each occasion.
|
||||
|
||||
This module relies on the SWI-Prolog tracer hooks. It modifies these
|
||||
hooks and collects the results, after which it restores the debugging
|
||||
environment. This has some limitations:
|
||||
|
||||
* The performance degrades significantly (about 10 times)
|
||||
* It is not possible to use the debugger using coverage analysis
|
||||
* The cover analysis tool is currently not thread-safe.
|
||||
|
||||
The result is represented as a list of clause-references. As the
|
||||
references to clauses of dynamic predicates cannot be guaranteed, these
|
||||
are omitted from the result.
|
||||
|
||||
@bug Relies heavily on SWI-Prolog internals. We have considered using
|
||||
a meta-interpreter for this purpose, but it is nearly impossible
|
||||
to do 100% complete meta-interpretation of Prolog. Example
|
||||
problem areas include handling cuts in control-structures
|
||||
and calls from non-interpreted meta-predicates.
|
||||
*/
|
||||
|
||||
|
||||
:- dynamic
|
||||
entered/1, % clauses entered
|
||||
exited/1. % clauses completed
|
||||
|
||||
:- module_transparent
|
||||
covering/1,
|
||||
covering/4.
|
||||
|
||||
%% show_coverage(Goal)
|
||||
%
|
||||
% Report on coverage by Goal
|
||||
|
||||
show_coverage(Goal) :-
|
||||
covered_clauses(Goal, Result, Succeeded, Failed),
|
||||
file_coverage(Succeeded, Failed),
|
||||
return(Result).
|
||||
|
||||
return(true).
|
||||
return(fail) :- !, fail.
|
||||
return(error(E)) :-
|
||||
throw(E).
|
||||
|
||||
%% covered_clauses(:Goal, -Result, -Succeeded, -Failed) is det.
|
||||
%
|
||||
% Run Goal as once/1. Unify Result with one of =true=, =fail= or
|
||||
% error(Error).
|
||||
%
|
||||
% @param Succeeded Ordered set of succeeded clauses
|
||||
% @param Failed Ordered set of clauses that are entered but
|
||||
% never succeeded.
|
||||
|
||||
covered_clauses(Goal, Result, Succeeded, Failed) :-
|
||||
asserta(user:prolog_trace_interception(Port, Frame, _, continue) :-
|
||||
prolog_cover:assert_cover(Port, Frame), Ref),
|
||||
port_mask([unify,exit], Mask),
|
||||
'$visible'(V, Mask),
|
||||
'$leash'(L, Mask),
|
||||
trace,
|
||||
call_with_result(Goal, Result),
|
||||
set_prolog_flag(debug, false),
|
||||
covered(Ref, V, L, Succeeded, Failed).
|
||||
|
||||
%% call_with_result(:Goal, -Result) is det.
|
||||
%
|
||||
% Run Goal as once/1. Unify Result with one of =true=, =fail= or
|
||||
% error(Error).
|
||||
|
||||
call_with_result(Goal, Result) :-
|
||||
( catch(Goal, E, true)
|
||||
-> ( var(E)
|
||||
-> Result = true
|
||||
; Result = error(E)
|
||||
)
|
||||
; Result = false
|
||||
).
|
||||
|
||||
port_mask([], 0).
|
||||
port_mask([H|T], Mask) :-
|
||||
port_mask(T, M0),
|
||||
'$syspreds':'$port_bit'(H, Bit), % Private stuff
|
||||
Mask is M0 \/ Bit.
|
||||
|
||||
%% assert_cover(+Port, +Frame) is det.
|
||||
%
|
||||
% Assert coverage of the current clause. We monitor two ports: the
|
||||
% _unify_ port to see which clauses we entered, and the _exit_
|
||||
% port to see which completed successfully.
|
||||
|
||||
assert_cover(unify, Frame) :-
|
||||
running_static_pred(Frame),
|
||||
prolog_frame_attribute(Frame, clause, Cl), !,
|
||||
assert_entered(Cl).
|
||||
assert_cover(exit, Frame) :-
|
||||
running_static_pred(Frame),
|
||||
prolog_frame_attribute(Frame, clause, Cl), !,
|
||||
assert_exited(Cl).
|
||||
assert_cover(_, _).
|
||||
|
||||
%% running_static_pred(+Frame) is semidet.
|
||||
%
|
||||
% True if Frame is not running a dynamic predicate.
|
||||
|
||||
running_static_pred(Frame) :-
|
||||
prolog_frame_attribute(Frame, goal, Goal),
|
||||
\+ predicate_property(Goal, dynamic).
|
||||
|
||||
%% assert_entered(+Ref) is det.
|
||||
%% assert_exited(+Ref) is det.
|
||||
%
|
||||
% Add Ref to the set of entered or exited clauses.
|
||||
|
||||
assert_entered(Cl) :-
|
||||
entered(Cl), !.
|
||||
assert_entered(Cl) :-
|
||||
assert(entered(Cl)).
|
||||
|
||||
assert_exited(Cl) :-
|
||||
exited(Cl), !.
|
||||
assert_exited(Cl) :-
|
||||
assert(exited(Cl)).
|
||||
|
||||
%% covered(+Ref, +VisibleMask, +LeashMask, -Succeeded, -Failed) is det.
|
||||
%
|
||||
% Restore state and collect failed and succeeded clauses.
|
||||
|
||||
covered(Ref, V, L, Succeeded, Failed) :-
|
||||
'$visible'(_, V),
|
||||
'$leash'(_, L),
|
||||
erase(Ref),
|
||||
findall(Cl, (entered(Cl), \+exited(Cl)), Failed0),
|
||||
findall(Cl, retract(exited(Cl)), Succeeded0),
|
||||
retractall(entered(Cl)),
|
||||
sort(Failed0, Failed),
|
||||
sort(Succeeded0, Succeeded).
|
||||
|
||||
|
||||
/*******************************
|
||||
* REPORTING *
|
||||
*******************************/
|
||||
|
||||
%% file_coverage(+Succeeded, +Failed) is det.
|
||||
%
|
||||
% Write a report on the clauses covered organised by file to
|
||||
% current output.
|
||||
|
||||
file_coverage(Succeeded, Failed) :-
|
||||
format('~N~n~`=t~78|~n'),
|
||||
format('~tCoverage by File~t~78|~n'),
|
||||
format('~`=t~78|~n'),
|
||||
format('~w~t~w~64|~t~w~72|~t~w~78|~n',
|
||||
['File', 'Clauses', '%Cov', '%Fail']),
|
||||
format('~`=t~78|~n'),
|
||||
forall(source_file(File),
|
||||
file_coverage(File, Succeeded, Failed)),
|
||||
format('~`=t~78|~n').
|
||||
|
||||
file_coverage(File, Succeeded, Failed) :-
|
||||
findall(Cl, clause_source(Cl, File, _), Clauses),
|
||||
sort(Clauses, All),
|
||||
( ord_intersect(All, Succeeded)
|
||||
-> true
|
||||
; ord_intersect(All, Failed)
|
||||
), !,
|
||||
ord_intersection(All, Failed, FailedInFile),
|
||||
ord_intersection(All, Succeeded, SucceededInFile),
|
||||
ord_subtract(All, SucceededInFile, UnCov1),
|
||||
ord_subtract(UnCov1, FailedInFile, Uncovered),
|
||||
length(All, AC),
|
||||
length(Uncovered, UC),
|
||||
length(FailedInFile, FC),
|
||||
CP is 100-100*UC/AC,
|
||||
FCP is 100*FC/AC,
|
||||
summary(File, 56, SFile),
|
||||
format('~w~t ~D~64| ~t~1f~72| ~t~1f~78|~n', [SFile, AC, CP, FCP]).
|
||||
file_coverage(_,_,_).
|
||||
|
||||
|
||||
summary(Atom, MaxLen, Summary) :-
|
||||
atom_length(Atom, Len),
|
||||
( Len < MaxLen
|
||||
-> Summary = Atom
|
||||
; SLen is MaxLen - 5,
|
||||
sub_atom(Atom, _, SLen, 0, End),
|
||||
atom_concat('...', End, Summary)
|
||||
).
|
||||
|
||||
|
||||
%% clause_source(+Clause, -File, -Line) is det.
|
||||
%% clause_source(-Clause, +File, -Line) is det.
|
||||
|
||||
clause_source(Clause, File, Line) :-
|
||||
nonvar(Clause), !,
|
||||
clause_property(Clause, file(File)),
|
||||
clause_property(Clause, line_count(Line)).
|
||||
clause_source(Clause, File, Line) :-
|
||||
source_file(Pred, File),
|
||||
\+ predicate_property(Pred, multifile),
|
||||
nth_clause(Pred, _Index, Clause),
|
||||
clause_property(Clause, line_count(Line)).
|
||||
clause_source(Clause, File, Line) :-
|
||||
Pred = _:_,
|
||||
predicate_property(Pred, multifile),
|
||||
nth_clause(Pred, _Index, Clause),
|
||||
clause_property(Clause, file(File)),
|
||||
clause_property(Clause, line_count(Line)).
|
@ -1,211 +0,0 @@
|
||||
:- module(test_wizard,
|
||||
[ make_tests/3, % +Module, +File, +Out
|
||||
make_test/3 % +Callable, -Module, -Test
|
||||
]).
|
||||
:- use_module(library(time)).
|
||||
:- use_module(library(lists)).
|
||||
:- use_module(library(listing)).
|
||||
:- use_module(library(readutil)).
|
||||
|
||||
/** <module> Test Generation Wizard
|
||||
|
||||
Tasks
|
||||
|
||||
* Accumulate user queries
|
||||
* Suggest tests from user queries
|
||||
*/
|
||||
|
||||
setting(max_time(5)).
|
||||
|
||||
|
||||
/*******************************
|
||||
* UNIT GENERATION *
|
||||
*******************************/
|
||||
|
||||
%% make_tests(+Module, +File, +Out) is det.
|
||||
%
|
||||
% Create tests from queries stored in File and write the tests for
|
||||
% Module to the stream Out.
|
||||
|
||||
make_tests(Module, File, Out) :-
|
||||
read_file_to_terms(File, Queries, []),
|
||||
findall(Test, ( member(Q, Queries),
|
||||
make_test(Q, Module, Test)), Tests),
|
||||
( Tests == []
|
||||
-> true
|
||||
; format(Out, ':- begin_tests(~q).~n~n', [Module]),
|
||||
maplist(portray_clause(Out), Tests),
|
||||
format(Out, '~n:- end_tests(~q).~n', [Module])
|
||||
).
|
||||
|
||||
|
||||
/*******************************
|
||||
* TEST GENERATION *
|
||||
*******************************/
|
||||
|
||||
%% make_test(+Query:callable, -Module, -Test:term) is det.
|
||||
%
|
||||
% Generate a test from a query. Test is returned as a clause of
|
||||
% test/1 or test/2 to be inserted between begin_tests and
|
||||
% end_tests.
|
||||
|
||||
make_test(Query0, Module, (test(Name, Options) :- Query)) :-
|
||||
find_test_module(Query0, Module, Query),
|
||||
pred_name(Query, Name),
|
||||
setting(max_time(Max)),
|
||||
test_result(Module:Query, Max, Options).
|
||||
|
||||
%% find_test_module(+QuerySpec, ?Module, -Query).
|
||||
%
|
||||
% Find module to test from a query. Note that it is very common
|
||||
% for toplevel usage to rely on SWI-Prolog's DWIM.
|
||||
%
|
||||
% @tbd What if multiple modules match? We can select the
|
||||
% local one or ask the user.
|
||||
|
||||
find_test_module(Var, _, _) :-
|
||||
var(Var), !, fail.
|
||||
find_test_module(M:Query, M0, Query) :- !,
|
||||
M0 = M.
|
||||
find_test_module(Query, M, Query) :-
|
||||
current_predicate(_, M:Query),
|
||||
\+ predicate_property(M:Query, imported_from(_M2)).
|
||||
|
||||
%% pred_name(+Callable, -Name) is det.
|
||||
%
|
||||
% Suggest a name for the test. In the plunit framework the name
|
||||
% needs not be unique, so we simply take the predicate name.
|
||||
|
||||
pred_name(Callable, Name) :-
|
||||
strip_module(Callable, _, Term),
|
||||
functor(Term, Name, _Arity).
|
||||
|
||||
%% test_result(+Callable, +Maxtime, -Result) is det.
|
||||
%
|
||||
% Try running goal and get meaningful results. Results are:
|
||||
%
|
||||
% * true(Templ == Var)
|
||||
% * fail
|
||||
% * all(Templ == Bindings)
|
||||
% * throws(Error)
|
||||
% * timeout
|
||||
|
||||
test_result(Callable, Maxtime, Result) :-
|
||||
term_variables(Callable, Vars),
|
||||
make_template(Vars, Templ),
|
||||
catch(call_with_time_limit(Maxtime,
|
||||
findall(Templ-Det,
|
||||
call_test(Callable, Det),
|
||||
Bindings)),
|
||||
E, true),
|
||||
( var(E)
|
||||
-> success(Bindings, Templ, Result)
|
||||
; error(E, Result)
|
||||
).
|
||||
|
||||
%% success(+Bindings, +Templ, -Result) is det.
|
||||
%
|
||||
% Create test-results from non-error cases.
|
||||
|
||||
success([], _, [fail]) :- !.
|
||||
success([[]-true], _, []) :- !.
|
||||
success([S1-true], Templ, [ true(Templ == S1) ]) :- !.
|
||||
success([[]-false], _, [ nondet ]) :- !.
|
||||
success([S1-false], Templ, [ true(Templ == S1), nondet ]) :- !.
|
||||
success(ListDet, Templ, [all(Templ == List)]) :-
|
||||
strip_det(ListDet, List).
|
||||
|
||||
strip_det([], []).
|
||||
strip_det([H-_|T0], [H|T]) :-
|
||||
strip_det(T0, T).
|
||||
|
||||
%% error(+ErrorTerm, -Result)
|
||||
|
||||
error(Error0, [throws(Error)]) :-
|
||||
generalise_error(Error0, Error).
|
||||
|
||||
|
||||
generalise_error(error(Formal, _), error(Formal, _)) :- !.
|
||||
generalise_error(Term, Term).
|
||||
|
||||
|
||||
%% make_template(+Vars, -Template) is det.
|
||||
%
|
||||
% Make a nice looking template
|
||||
|
||||
make_template([], []) :- !.
|
||||
make_template([One], One) :- !.
|
||||
make_template([One, Two], One-Two) :- !.
|
||||
make_template(List, Vars) :-
|
||||
Vars =.. [v|List].
|
||||
|
||||
%% call_test(:Goal, -Det) is nondet.
|
||||
%
|
||||
% True if Goal succeeded. Det is unified to =true= if Goal left
|
||||
% no choicepoints and =false= otherwise.
|
||||
|
||||
call_test(Goal, Det) :-
|
||||
Goal,
|
||||
deterministic(Det).
|
||||
|
||||
|
||||
/*******************************
|
||||
* COLLECT *
|
||||
*******************************/
|
||||
|
||||
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Collect toplevel queries if the Prolog flag log_query_file points to the
|
||||
name of a writeable file. The file is opened in append-mode for
|
||||
exclusive write to allow for concurrent operation from multiple Prolog
|
||||
systems using the same logfile.
|
||||
|
||||
The file is written in UTF-8 encoding and using ignore_ops(true) to
|
||||
ensure it can be read.
|
||||
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
|
||||
|
||||
:- multifile
|
||||
user:message_hook/3.
|
||||
|
||||
user:message_hook(toplevel_goal(Goal0, Bindings), _Level, _Lines) :-
|
||||
open_query_log(Out),
|
||||
bind_vars(Bindings),
|
||||
clean_goal(Goal0, Goal),
|
||||
call_cleanup(format(Out, '~W.~n', [Goal, [ numbervars(true),
|
||||
quoted(true),
|
||||
ignore_ops(true)
|
||||
]]), close(Out)),
|
||||
fail.
|
||||
|
||||
clean_goal(Var, _) :-
|
||||
var(Var), !, fail.
|
||||
clean_goal(user:Goal, Goal) :- !.
|
||||
clean_goal(Goal, Goal).
|
||||
|
||||
bind_vars([]).
|
||||
bind_vars([Name=Var|T]) :-
|
||||
Var = '$VAR'(Name),
|
||||
bind_vars(T).
|
||||
|
||||
open_query_log(Out) :-
|
||||
current_prolog_flag(log_query_file, File),
|
||||
exists_file(File), !,
|
||||
open(File, append, Out,
|
||||
[ encoding(utf8),
|
||||
lock(write)
|
||||
]).
|
||||
open_query_log(Out) :-
|
||||
current_prolog_flag(log_query_file, File),
|
||||
access_file(File, write), !,
|
||||
open(File, write, Out,
|
||||
[ encoding(utf8),
|
||||
lock(write),
|
||||
bom(true)
|
||||
]),
|
||||
format(Out,
|
||||
'/* SWI-Prolog query log. This file contains all syntactically\n \
|
||||
correct queries issued in this directory. It is used by the\n \
|
||||
test wizard to generate unit tests.\n\
|
||||
*/~n~n', []).
|
||||
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 4ce1e49b493a8571a2159660195adcf5f353af79
|
||||
Subproject commit 82c00310523e08d1bd314374aed335e4b511c823
|
@ -1 +1 @@
|
||||
Subproject commit 3a23a5129738fa08327922bda35bb5458191aaa5
|
||||
Subproject commit 28338edf66fc4618518fa4be9bf6e28bc81a7aaf
|
@ -1045,3 +1045,4 @@ make.
|
||||
'$fetch_stream_alias'(OldStream,Alias) :-
|
||||
stream_property(OldStream, alias(Alias)), !.
|
||||
|
||||
'$require'(_Ps, _M).
|
@ -48,6 +48,7 @@
|
||||
'$directive'(parallel).
|
||||
'$directive'(public(_)).
|
||||
'$directive'(op(_,_,_)).
|
||||
'$directive'(require(_)).
|
||||
'$directive'(set_prolog_flag(_,_)).
|
||||
'$directive'(reconsult(_)).
|
||||
'$directive'(reexport(_)).
|
||||
@ -104,6 +105,8 @@
|
||||
'$module_transparent'(P, M).
|
||||
'$exec_directive'(noprofile(P), _, M) :-
|
||||
'$noprofile'(P, M).
|
||||
'$exec_directive'(require(Ps), _, M) :-
|
||||
'$require'(Ps, M).
|
||||
'$exec_directive'(dynamic(P), _, M) :-
|
||||
'$dynamic'(P, M).
|
||||
'$exec_directive'(thread_local(P), _, M) :-
|
||||
|
@ -301,6 +301,9 @@ putenv(Na,Val) :-
|
||||
getenv(Na,Val) :-
|
||||
'$getenv'(Na,Val).
|
||||
|
||||
setenv(Na,Val) :-
|
||||
'$putenv'(Na,Val).
|
||||
|
||||
%%% Saving and restoring a computation
|
||||
|
||||
save(A) :- save(A,_).
|
||||
|
Reference in New Issue
Block a user