new CPLint and ProbLog versions.

This commit is contained in:
Vítor Santos Costa 2011-09-15 15:49:06 +01:00
parent 91791f8e3d
commit cdd33b8c1a
9 changed files with 1516 additions and 739 deletions

View File

@ -2,8 +2,8 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% $Date: 2011-04-08 19:30:08 +0200 (Fri, 08 Apr 2011) $
% $Revision: 5887 $
% $Date: 2011-09-02 11:23:22 +0200 (Fri, 02 Sep 2011) $
% $Revision: 6475 $
%
% This file is part of ProbLog
% http://dtai.cs.kuleuven.be/problog
@ -229,7 +229,10 @@
problog_kbest/4,
problog_kbest_save/6,
problog_max/3,
problog_kbest_explanations/3,
problog_exact/3,
problog_all_explanations/2,
problog_all_explanations_unsorted/2,
problog_exact_save/5,
problog_montecarlo/3,
problog_dnf_sampling/3,
@ -302,7 +305,7 @@
:- yap_flag(unknown,error).
% general yap modules
:- use_module(library(lists), [append/3,member/2,memberchk/2,reverse/2,select/3,nth1/3,nth1/4,nth0/4]).
:- use_module(library(lists), [append/3,member/2,memberchk/2,reverse/2,select/3,nth1/3,nth1/4,nth0/4,sum_list/2]).
:- use_module(library(terms), [variable_in_term/2,variant/2] ).
:- use_module(library(random), [random/1]).
:- use_module(library(system), [tmpnam/1,shell/2,delete_file/1,delete_file/2]).
@ -1274,6 +1277,8 @@ print_ad_intern((A1;B1),[A2|B2],Mass,Handle) :-
print_ad_intern(_::Fact,[],Mass,Handle) :-
P2 is 1.0 - Mass,
format(Handle,'~10f :: ~q',[P2,Fact]).
print_ad_intern(P::A1,[A2],Mass,Handle) :-
once(print_ad_intern_one(P::A1,A2,Mass,_NewMass,Handle)).
print_ad_intern_one(_::Fact,_::AuxFact,Mass,NewMass,Handle) :-
% ask problog to get the fact_id
once(probabilistic_fact(P,AuxFact,_FactID)),
@ -2099,6 +2104,40 @@ init_problog_low(Threshold) :-
nb_setval(problog_completed_proofs, Trie_Completed_Proofs),
init_problog(Threshold).
% generalizing problog_max to return all explanations, sorted by non-increasing probability
problog_all_explanations(Goal,Expl) :-
problog_all_explanations_unsorted(Goal,Unsorted),
keysort(Unsorted,Decreasing),
reverse(Decreasing,Expl).
problog_all_explanations_unsorted(Goal, _) :-
init_problog_low(0.0),
problog_control(off, up),
timer_start(sld_time),
problog_call(Goal),
add_solution,
fail.
problog_all_explanations_unsorted(_,Expl) :-
timer_stop(sld_time,SLD_Time),
problog_var_set(sld_time, SLD_Time),
nb_getval(problog_completed_proofs, Trie_Completed_Proofs),
explanations_from_trie(Trie_Completed_Proofs,Expl).
% catch basecases
explanations_from_trie(Trie,[]) :-
empty_ptree(Trie),!.
explanations_from_trie(Trie,[1.0-[]]) :-
traverse_ptree(Trie,[true]),!.
explanations_from_trie(Trie_Completed_Proofs,Expl) :-
findall(Prob-Facts,
(traverse_ptree(Trie_Completed_Proofs,L),
findall(P,(member(A,L),get_fact_log_probability(A,P)),Ps),
sum_list(Ps,LS),
Prob is exp(LS),
get_fact_list(L,Facts)
),Expl).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% approximate inference: bounds by iterative deepening up to interval width Delta
% problog_delta(+Goal,+Delta,-LowerBound,-UpperBound,-Status)
@ -2351,6 +2390,15 @@ problog_kbest(Goal, K, Prob, Status) :-
eval_dnf(Trie_Completed_Proofs,Prob,Status),
delete_ptree(Trie_Completed_Proofs).
% generalizes problog_max to return the k best explanations
problog_kbest_explanations(Goal, K, Explanations) :-
problog_flag(first_threshold,InitT),
init_problog_kbest(InitT),
problog_control(off,up),
problog_kbest_id(Goal, K),
retract(current_kbest(_,ListFound,_NumFound)),
to_external_format_with_reverse(ListFound,Explanations).
problog_real_kbest(Goal, K, Prob, Status) :-
problog_flag(first_threshold,InitT),
init_problog_kbest(InitT),
@ -2463,6 +2511,15 @@ take_k_best(In,K,OutOf,Out) :-
take_k_best(R,K,OutOf2,Out)
).
to_external_format_with_reverse(Intern,Extern) :-
to_external_format_with_reverse(Intern,[],Extern).
to_external_format_with_reverse([],Extern,Extern).
to_external_format_with_reverse([LogP-FactIDs|Intern],Acc,Extern) :-
Prob is exp(LogP),
( FactIDs = [_|_] -> get_fact_list(FactIDs, Facts);
Facts = FactIDs),
to_external_format_with_reverse(Intern,[Prob-Facts|Acc],Extern).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% exact probability
% problog_exact(+Goal,-Prob,-Status)
@ -3073,6 +3130,7 @@ problog_bdd_forest(Goals) :-
unrequire(keep_ground_ids),
reset_non_ground_facts,
bdd_par_file(BDDParFile),
% format('Vars: ~w~n',[Vars]),
tell(BDDParFile),
bdd_vars_script(Vars),
flush_output, % isnt this called by told/0?
@ -3089,8 +3147,8 @@ problog_bdd_forest(Goals) :-
problog_bdd_forest_supported :- build_trie_supported.
% Iterate over all Goals, write BDD scripts and collect variables used.
write_bdd_forest([],VarsTot,VarsTot,_).
write_bdd_forest([Goal|Rest],VarsAcc,VarsTot,N):-
write_bdd_forest([],AtomsTot,AtomsTot,_).
write_bdd_forest([Goal|Rest],AtomsAcc,AtomsTot,N) :-
build_trie(Goal, Trie),
write_nth_bdd_struct_script(N, Trie, Vars),
(problog_flag(verbose, true)->
@ -3100,9 +3158,15 @@ write_bdd_forest([Goal|Rest],VarsAcc,VarsTot,N):-
),
delete_ptree(Trie),
N2 is N+1,
list_to_ord_set(Vars,VarsSet),
ord_union(VarsAcc,VarsSet,VarsAcc2),
once(write_bdd_forest(Rest,VarsAcc2,VarsTot,N2)).
% map 'not id' to id in Vars
findall(ID,(member((not ID),Vars)) ,NegativeAtoms),
findall(ID,(member(ID,Vars),ID \= (not _)),PositiveAtoms),
% format('PositiveAtoms: ~w~n',[PositiveAtoms]),
% format('NegativeAtoms: ~w~n',[NegativeAtoms]),
append(PositiveAtoms,NegativeAtoms,Atoms),
list_to_ord_set(Atoms,AtomsSet),
ord_union(AtomsAcc,AtomsSet,AtomsAcc2),
once(write_bdd_forest(Rest,AtomsAcc2,AtomsTot,N2)).
% Write files
write_nth_bdd_struct_script(N,Trie,Vars) :-

View File

@ -2,8 +2,8 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% $Date: 2011-04-26 15:48:52 +0200 (Tue, 26 Apr 2011) $
% $Revision: 6371 $
% $Date: 2011-08-19 13:13:56 +0200 (Fri, 19 Aug 2011) $
% $Revision: 6471 $
%
% This file is part of ProbLog
% http://dtai.cs.kuleuven.be/problog
@ -210,15 +210,15 @@
]).
% general yap modules
:- use_module(library(lists),[reverse/2,member/2,memberchk/2,append/3]).
:- use_module(library(lists),[member/2,append/3]).
:- use_module(flags).
:- style_check(all).
:- yap_flag(unknown,error).
:- discontiguous user:(<--)/2, problog:(<--)/2.
:- discontiguous user:myclause/1, problog:myclause/1. % notation of ADs in LFI-ProbLog
:- op( 550, yfx, :: ).
% for annotated disjunctions
@ -230,48 +230,70 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
term_expansion_intern_ad( (Head<--Body),Module,Mode,Result) :-
problog_flag(ad_cpl_semantics,AD_CPL_Semantics),
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% term_expansion_intern_ad( +AD, +Module, +Mode, -ListOfAtoms)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- bb_put(ad_converter_unique_id,1).
term_expansion_intern_ad((Head<--Body), Module, Mode, [user:ad_intern((Head<--Body),ID,Aux_Facts)|Result]) :-
% the internal ID for the annotated disjunction
bb_get(ad_converter_unique_id,ID),
ID2 is ID+1,
bb_put(ad_converter_unique_id,ID2),
% if CPL semantics is on we need to add all body variables to the
% auxilliary probabilistic facts to ensure that each grounding
% of an AD "triggers" a new CP event
(
proper_tunable_annotated_disjunction(Head)
->
compile_tunable_annotated_disjunction(Head,Body,Facts,Bodies,ID,AD_CPL_Semantics,Mode);
(
proper_annotated_disjunction(Head,Sum_of_P_in_Head)
->
compile_annotated_disjunction(Head,Body,Facts,Bodies,ID,AD_CPL_Semantics,Mode,Sum_of_P_in_Head);
throw(error(invalid_annotated_disjunction,(Head<--Body)))
)
problog_flag(ad_cpl_semantics,true) ->
term_variables(Body,Body_Vars)
;
Body_Vars=[]
),
% construct the auxilliary facts we need to represent the AD
(
% if it's a tunable AD create tunable auxilliary facts
proper_tunable_ad_head(Head) ->
create_tunable_ad_aux_facts(Head,Body_Vars,ID,1,Aux_Facts)
;
% if it's a regular AD create auxilliary facts
proper_ad_head(Head,0.0) ->
create_ad_aux_facts(Head,Body_Vars,ID,1,0.0,Aux_Facts)
;
% neither nor, let's complain
throw(error(invalid_annotated_disjunction,(Head<--Body)))
),
% call term_expansion for the aux facts, this has the same effect
% as if the use had defined the facts in the original file
findall(problog:Atom,(
member(F,Facts),
member(F,Aux_Facts),
once(problog:term_expansion_intern(F,Module,Atom))
),Result_Atoms),
% construct the auxilliary clauses
create_aux_bodies(Head,Body_Vars,Body,ID,1,Aux_Facts,Mode,Aux_Clauses),
(
Mode==lfi_learning
->
findall(Module:myclause(H,B),member((H:-B),Bodies),Result_Bodies);
findall(Module:B,member(B,Bodies),Result_Bodies)
Mode==lfi_learning ->
findall(Module:myclause(H,B),member((H:-B),Aux_Clauses),Result,Result_Atoms)
;
findall(Module:B,member(B,Aux_Clauses),Result,Result_Atoms)
),
append(Result_Atoms,Result_Bodies,Result),
problog_flag(show_ad_compilation,Show_AD_compilation),
(
Show_AD_compilation==true
problog_flag(show_ad_compilation,true)
->
(
format('Compiling the annotated disjunction~n ~q~ninto the following code~n',[(Head<--Body)]),
format('================================================~n',[]),
forall(member(F,Facts),format(' ~q.~n',[F])),
forall(member(F,Aux_Facts),format(' ~q.~n',[F])),
format(' - - - - - - - - - - - - - - - - - - - - - - ~n',[]),
forall(member(B,Bodies),format(' ~q.~n',[B])),
forall(member(B,Aux_Clauses),format(' ~q.~n',[B])),
format('================================================~2n',[])
);
true
@ -283,167 +305,155 @@ term_expansion_intern_ad( (Head<--Body),_,_) :-
fail.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% proper_ad_head(+Head, +Acc)
%
% this predicate succeeds if Head is valid disjunction
% of probabilistic facts as used in the head of an AD
% in particular, it checks that all probabilities are
% valid and the sum does not exceed 1.0
%
% if will throw an exception if any of the probabilties P
% P::A
% can not be evaluated using is/2
%
% ?- proper_ad_head( 0.1::a, 0.1).
% yes
% ?- proper_ad_head( (0.1::a,0.8::b), 0.1).
% no
% ?- proper_ad_head( (0.1::a;0.8::b), 0.1).
% yes
% ?- proper_ad_head( (0.1::a;0.8::b;0.2::c), 0.1).
% no
% ?- proper_ad_head( (0.1::a;0.4::true), 0.1).
% no
% ?- ad_converter:proper_ad_head( (1/2::a;0.4::foo(X)), 0.1).
% true
% ?- ad_converter:proper_ad_head( (goo::a;0.4::foo(X)), 0.1).
% ERROR at clause 2 of ad_converter:proper_ad_head/2 !!
% TYPE ERROR
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
get_next_unique_id(ID) :-
proper_ad_head( P :: A, Acc) :-
P>=0.0,
P+Acc=<1.0,
\+ var(A),
\+ system_predicate(_,A),
once((atom(A);compound(A))).
proper_ad_head((P :: A;T),Acc) :-
\+ var(A),
\+ system_predicate(_,A),
once((atom(A);compound(A))),
P>=0.0,
Acc2 is P+Acc,
proper_ad_head(T,Acc2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% proper_tunable_ad_head(+Head)
%
% this predicate succeeds if Head is valid disjunction of
% tunable probabilistic facts as used in the head of an AD
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
proper_tunable_ad_head( t(_)::A ) :-
\+ var(A),
\+ system_predicate(_,A),
once((atom(A);compound(A))).
proper_tunable_ad_head( ( t(_)::A ;T) ) :-
\+ var(A),
\+ system_predicate(_,A),
once((atom(A);compound(A))),
proper_tunable_ad_head(T).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% create_mws_atom(+Atom,+Body_Vars,+ID,+Pos,-A2)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
create_mws_atom(A,Body_Vars,ID,Pos,A2) :-
A =.. [_F|Args],
append(Args,Body_Vars,Args2),
atomic_concat([mvs_fact_,ID,'_',Pos],F2),
A2 =.. [F2|Args2].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% create_ad_aux_facts(+Head,+Vars,+ID,+POS,+Acc,-Facts)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
create_ad_aux_facts(P::_, _, _, _, Acc, []) :-
% if the probabilities in the head of the AD
% sum up to 1.0 drop the last aux fact
abs(Acc+P-1.0) < 0.0000001,
!.
create_ad_aux_facts(P::Atom, Body_Vars, ID, Pos, Acc, [P1::ProbFact]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact),
(
bb_get(mvs_unique_id,ID)
->
true;
ID=1
),
ID2 is ID+1,
bb_put(mvs_unique_id,ID2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
proper_annotated_disjunction(AD,Sum) :-
proper_annotated_disjunction(AD,0.0,Sum),
Sum=<1.
proper_annotated_disjunction( P :: _, OldSum,NewSum) :-
% evaluate P
P2 is P,
P2>=0,
P2=<1,
NewSum is OldSum+P.
proper_annotated_disjunction((X;Y),OldSum,Sum) :-
proper_annotated_disjunction(X,OldSum,NewSum),
proper_annotated_disjunction(Y,NewSum,Sum).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
proper_tunable_annotated_disjunction( t(_) :: _).
proper_tunable_annotated_disjunction((X;Y)) :-
proper_tunable_annotated_disjunction(X),
proper_tunable_annotated_disjunction(Y).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compile_tunable_annotated_disjunction(Head,Body,Facts2,Bodies2,Extra_ID,AD_CPL_Semantics,Mode) :-
get_next_unique_id(Extra_ID),
(
AD_CPL_Semantics==true
->
term_variables(Body,Body_Vars);
Body_Vars=[]
),
convert_a_tunable(Head,Extra_ID,[],Facts0,Body_Vars),
problog_flag(ad_sumto1_learning,AD_SumTo1_Learning),
(
AD_SumTo1_Learning==true
->
Facts0=[_|Facts1];
Facts1=Facts0
),
reverse(Facts1,Facts2),
convert_b(Head,Body,_NewBody,Extra_ID,[],Bodies,Body_Vars,Mode,Facts2),
reverse(Bodies,Bodies2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compile_annotated_disjunction(Head,Body,Facts2,Bodies2,Extra_ID,AD_CPL_Semantics,Mode,ProbSum) :-
get_next_unique_id(Extra_ID),
(
AD_CPL_Semantics==true
->
term_variables(Body,Body_Vars);
Body_Vars=[]
),
convert_a(Head,0.0,_Acc,Extra_ID,[],Facts0,Body_Vars),
(
abs(ProbSum-1.0) < 0.0000001
->
Facts0=[_|Facts1];
Facts1=Facts0
),
reverse(Facts1,Facts2),
convert_b(Head,Body,_NewBody,Extra_ID,[],Bodies,Body_Vars,Mode,Facts2),
reverse(Bodies,Bodies2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
convert_a((X;Y),OldAcc,Acc,Extra_ID,OldFacts,Facts,Body_Vars) :-
convert_a(X,OldAcc,NewAcc,Extra_ID,OldFacts,NewFacts,Body_Vars),
convert_a(Y,NewAcc,Acc,Extra_ID,NewFacts,Facts,Body_Vars).
convert_a(P::Atom,OldAcc,NewAcc,Extra_ID,OldFacts,[P1::ProbFact|OldFacts],Body_Vars) :-
Atom =.. [Functor|AllArguments],
append(AllArguments,Body_Vars,NewAllArguments),
length(AllArguments,Arity),
atomic_concat([mvs_fact_,Functor,'_',Arity,'_',Extra_ID],NewAtom),
ProbFact =.. [NewAtom|NewAllArguments],
(
(P=:=0; OldAcc=:=0)
->
P1 is P;
P1 is min(P/(1-OldAcc),1.0)
),
NewAcc is OldAcc+P.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
convert_a_tunable((X;Y),Extra_ID,OldFacts,Facts,Body_Vars) :-
convert_a_tunable(X,Extra_ID,OldFacts,NewFacts,Body_Vars),
convert_a_tunable(Y,Extra_ID,NewFacts,Facts,Body_Vars).
convert_a_tunable(t(_)::Atom,Extra_ID,OldFacts,[t(_)::ProbFact|OldFacts],Body_Vars) :-
Atom =.. [Functor|AllArguments],
append(AllArguments,Body_Vars,NewAllArguments),
length(AllArguments,Arity),
atomic_concat([mvs_fact_,Functor,'_',Arity,'_',Extra_ID],NewAtom),
ProbFact =.. [NewAtom|NewAllArguments].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
convert_b((X;Y),OldBody,Body,ExtraID,OldBodies,Bodies,Body_Vars,Mode,Facts) :-
convert_b(X,OldBody,NewBody,ExtraID,OldBodies,NewBodies,Body_Vars,Mode,Facts),
convert_b(Y,NewBody,Body,ExtraID,NewBodies,Bodies,Body_Vars,Mode,Facts).
convert_b(_::Atom,OldBody,NewBody,Extra_ID,OldBodies,[(Atom:-ThisBody)|OldBodies],Body_Vars,Mode,Facts) :-
Atom =.. [Functor|AllArguments],
append(AllArguments,Body_Vars,NewAllArguments),
length(AllArguments,Arity),
atomic_concat([mvs_fact_,Functor,'_',Arity,'_',Extra_ID],NewFunctor),
ProbFact =.. [NewFunctor|NewAllArguments],
(
memberchk(_::ProbFact,Facts)
->
tuple_append(OldBody,ProbFact,ThisBody);
ThisBody=OldBody
),
(
Mode==lfi_learning
->
tuple_append(OldBody,\+ProbFact,NewBody);
tuple_append(OldBody,problog_not(ProbFact),NewBody)
(P=:=0; Acc=:=0)->
P1 is P
;
P1 is min(P/(1-Acc),1.0)
).
create_ad_aux_facts((P::Atom;T), Body_Vars, ID, Pos, Acc, [P1::ProbFact|T2]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact),
(
(P=:=0; Acc=:=0)->
P1 is P
;
P1 is min(P/(1-Acc),1.0)
),
Acc2 is Acc+P,
Pos2 is Pos+1,
create_ad_aux_facts(T,Body_Vars,ID,Pos2,Acc2,T2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
create_tunable_ad_aux_facts(t(_)::_,_,_,Pos,[]) :-
Pos>1,
problog_flag(ad_sumto1_learning,true),
!.
create_tunable_ad_aux_facts(t(_)::Atom,Body_Vars,ID,Pos,[t(_)::ProbFact]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact).
create_tunable_ad_aux_facts((t(_)::Atom;T),Body_Vars,ID,Pos,[t(_)::ProbFact|T2]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact),
Pos2 is Pos+1,
create_tunable_ad_aux_facts(T,Body_Vars,ID,Pos2,T2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
create_aux_bodies(_::Atom, Body_Vars, Body, ID, Pos, Aux_Facts , _, [(Atom:-Body2)]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact),
(
member(_::ProbFact,Aux_Facts)->
tuple_append(Body,ProbFact,Body2)
;
Body2=Body
).
create_aux_bodies((_::Atom; T), Body_Vars, Body, ID, Pos, Aux_Facts , Mode, [(Atom:-Body2)|T2]) :-
create_mws_atom(Atom,Body_Vars,ID,Pos,ProbFact),
tuple_append(Body,ProbFact,Body2),
(
Mode==lfi_learning ->
tuple_append(Body,\+ProbFact,Body3)
;
tuple_append(Body,problog_not(ProbFact),Body3)
),
Pos2 is Pos+1,
create_aux_bodies(T,Body_Vars,Body3,ID,Pos2,Aux_Facts,Mode,T2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%

View File

@ -70,7 +70,8 @@ CPLINT_PROGRAMS= \
$(CPLINT_SRCDIR)/approx/params.pl \
$(CPLINT_SRCDIR)/approx/parsing.pl \
$(CPLINT_SRCDIR)/approx/tptreefile.pl \
$(CPLINT_SRCDIR)/approx/utility.pl
$(CPLINT_SRCDIR)/approx/utility.pl \
$(CPLINT_SRCDIR)/approx/tptree_lpad.pl
CPLINT_SEMANTICS_PROGRAMS= \
$(CPLINT_SRCDIR)/semlpadsld.pl \
@ -143,6 +144,10 @@ CPLINT_DOCS=\
$(CPLINT_DOCDIR)/manual0x.png \
$(CPLINT_DOCDIR)/Makefile
CPLINT_LEARNING_PROGRAMS=\
$(CPLINT_SRCDIR)/em \
$(CPLINT_SRCDIR)/rib
all: $(SOBJS)
@ -171,6 +176,8 @@ install: all
for h in $(CPLINT_SEMANTICS_PROGRAMS); do $(INSTALL_DATA) $$h $(DESTDIR)$(SHAREDIR); done
$(INSTALL_PROGRAM) $(SOBJS) $(DESTDIR)$(YAPLIBDIR)
$(INSTALL_PROGRAM) approx/simplecuddLPADs/LPADBDD $(SHAREDIR)
cp -R $(CPLINT_LEARNING_PROGRAMS) $(DESTDIR)$(SHAREDIR)/cplint
installcheck:
for h in ${CPLINT_TEST_PROGRAMS}; do echo "t. halt." | $(BINDIR)/yap -l $$h; done

View File

@ -9,6 +9,7 @@
.cmr-12{font-size:120%;}
.cmtt-10{font-family: monospace;}
.cmtt-10{font-family: monospace;}
.cmbx-10{ font-weight: bold;}
.cmti-10{ font-style: italic;}
p.noindent { text-indent: 0em }
td p.noindent { text-indent: 0em; margin-top:0em; }

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,5 +1,5 @@
\ifnum\pdfoutput>0 % pdflatex compilation
\documentclass[a4paper,12pt]{article}
\documentclass[a4paper,10pt]{article}
\usepackage[pdftex]{graphicx}
\DeclareGraphicsExtensions{.pdf,.png,.jpg}
\RequirePackage[hyperindex]{hyperref}
@ -7,16 +7,16 @@
\documentclass{article}
\usepackage{graphicx}
\DeclareGraphicsExtensions{.png, .gif, .jpg}
\newcommand{\url}[1]{\Link[#1]{}{} #1 \EndLink}
\newcommand{\href}[2]{\Link[#1]{}{} #2 \EndLink}
\newcommand{\hypertarget}[2]{\Link[]{}{#1} #2 \EndLink}
\newcommand{\hyperlink}[2]{\Link[]{#1}{} #2 \EndLink}
\newcommand{\url}[1]{\Link[#1]{}{} #1 \EndLink}
\fi
\begin{document}
\title{\texttt{cplint} Version 2.0 Manual}
\title{\texttt{cplint} Manual}
\author{Fabrizio Riguzzi\\
@ -28,69 +28,41 @@ fabrizio.riguzzi@unife.it}
\section{Introduction}
\texttt{cplint} is a suite of programs for reasoning with LPADs \cite{VenVer03-TR,VenVer04-ICLP04-IC} and CP-logic programs \cite{VenDenBru-JELIA06,CP-logic-unp}.
It consists of three Prolog modules for answering queries using goal-oriented procedures plus
three
Prolog modules for answering queries using the definition of the semantics of LPADs and CP-logic.
The modules for answering queries using using goal-oriented procedures are \texttt{lpadsld.pl}, \texttt{lpad.pl} and
\texttt{cpl.pl}:
\begin{itemize}
\item \texttt{lpadsld.pl}: computes the probability of a query using the top-down procedure described in
in \cite{Rig-AIIA07-IC} and \cite{Rig-RCRA07-IC}. It is based on SLDNF resolution and is an adaptation of the interpreter for ProbLog \cite{DBLP:conf/ijcai/RaedtKT07}.
It was proved correct \cite{Rig-RCRA07-IC} with respect to the semantics of LPADs for range restricted acyclic programs \cite{DBLP:journals/ngc/AptB91} without function symbols.
It is also able to deal with extensions of LPADs and CP-logic: the clause bodies can contain \texttt{setof} and \texttt{bagof}, the probabilities in the head may be depend on variables in the body and it is possible to specify a uniform distribution in the head with reference to a \texttt{setof} or \texttt{bagof} operator. These extended features have been introduced in order to represent CLP(BN) \cite{SanPagQaz03-UAI-IC} programs and PRM models \cite{Getoor+al:JMLR02}:
\texttt{setof} and \texttt{bagof} allow to express dependency of an attribute from an aggregate function of another attribute, as in CLP(BN) and PRM, while the possibility of specifying a uniform distribution allows the use of the reference uncertainty feature of PRM.
\item \texttt{lpad.pl}: computes the probability of a query using a top-down procedure based on SLG resolution \cite{DBLP:journals/jacm/ChenW96}. As a consequence, it works for any sound LPADs, i.e., any LPAD such that each of its instances has a two valued well founded model.
\item \texttt{cpl.pl}: computes the probability of a query using a top-down procedure based on SLG resolution and moreover checks that the CP-logic program is valid, i.e., that it has at least an execution model.
\end{itemize}
The modules for answering queries using the definition of the semantics of LPADs and CP-logic are \texttt{semlpadsld.pl}, \texttt{semlpad.pl} and
\texttt{semcpl.pl}:
\begin{itemize}
\item \texttt{semlpadsld.pl}: given an LPAD $P$, it generates all the instances of $P$. The probability of a query $Q$ is computed by identifying all the instances where $Q$ is derivable by SLDNF resolution.
\item \texttt{semlpad.pl}: given an LPAD $P$, it generates all the instances of $P$. The probability of a query $Q$ is computed by identifying all the instances where $Q$ is derivable by SLG resolution.
\item \texttt{semlcpl.pl}: given an LPAD $P$, it builds an execution model of $P$, i.e., a probabilistic process that satisfy the principles of universal causation, sufficient causation, independent causation, no deus ex machina events and temporal precedence. It uses the definition of the semantics given in \cite{CP-logic-unp}.
\end{itemize}
%For program with function symbols, the semantics of LPADs and CP-logic are not defined. However, the interpreter accepts programs with function symbols and, if it does not go into a loop, it returns an answer. What is the meaning of this answer is subject of current study.
\texttt{cplint} is a suite of programs for reasoning with ICL \cite{DBLP:journals/ai/Poole97}, LPADs \cite{VenVer03-TR,VenVer04-ICLP04-IC} and CP-logic programs \cite{VenDenBru-JELIA06,DBLP:journals/tplp/VennekensDB09}. It contains programs both for inference and learning.
\section{Installation}
\texttt{cplint} is distributed in source code in the git version of Yap. It includes Prolog and C files. Download it by following the instruction in \url{http://www.ncc.up.pt/~vsc/Yap/downloads.html}.
\texttt{cplint} is distributed in source code in the CVS version of Yap. It includes Prolog and C files. Download it by following the instruction in \url{http://www.ncc.up.pt/~vsc/Yap/downloads.html}.
\texttt{cplint} requires cudd and glib-2.0.
You can download cudd from \url{http://vlsi.colorado.edu/~fabio/CUDD/}.
\texttt{cplint} requires \href{http://vlsi.colorado.edu/~fabio/CUDD/}{CUDD} and glib-2.0.
You can download CUDD from \url{ftp://vlsi.colorado.edu/pub/cudd-2.4.2.tar.gz}.
You can download glib-2.0 (version $\geq 2.0$) from \url{http://www.gtk.org/}. This is a standard GNU package
so it is easy to install it using the package management software of your Linux or Cygwin
distribution.
Compile cudd:
Compile CUDD:
\begin{enumerate}
\item downlad \texttt{cudd-2.4.2.tar.gz}
\item decompress it
\item decompress cudd-2.4.2.tar.gz
\item \texttt{cd cudd-2.4.2}
\item check makefile options
\item \texttt{make}
\item see the \texttt{README} file for instructions on compilation
\end{enumerate}
Install Yap together with \texttt{cplint}:
when compiling Yap following the instuction of the \texttt{INSTALL} file in the root of the Yap folder, use
when compiling Yap following the instruction of the \texttt{INSTALL} file in the root of the Yap folder, use
\begin{verbatim}
configure --enable-cplint=DIR
\end{verbatim}
Under Windows, you have to use Cygwin (glu does not compile under MinGW), so\\
where \verb|DIR| is the directory where CUDD is, i.e., the directory ending with \texttt{cudd-2.4.2}.
Under Windows, you have to use Cygwin (CUDD does not compile under MinGW), so\\
\begin{verbatim}
configure --enable-cplint=DIR --enable-cygwin
\end{verbatim}
where \texttt{DIR} is the path to the directory \texttt{cudd-2.4.2} (including \texttt{cudd-2.4.2}).
After having performed \texttt{make install} you can do \texttt{make installcheck} that will execute a suite of tests of the various programs. If no error is reported you have a working installation of \texttt{cplint}.
\section{Syntax}
LPAD and CP-logic programs consist of a set of annotated disjunctive clauses.
Disjunction in the head is represented with a semicolon and atoms in the head are separated from probabilities by a colon. For the rest, the usual syntax of Prolog is used.
For example, the CP-logic clause
$$h_1:p_1\vee \ldots \vee h_n:p_n\leftarrow b_1,\dots,b_m ,\neg c_1,\ldots,\neg c_l$$
@ -127,10 +99,68 @@ toss(coin).
\end{verbatim}
The first clause states that if we toss a coin that is not biased it has equal probability of landing heads and tails. The second states that if the coin is biased it has a slightly higher probability of landing heads. The third states that the coin is fair with probability 0.9 and biased with probability 0.1 and the last clause states that we toss a coin with certainty.
Moreover, the bodies of rules can contain the built-in predicates:
\begin{verbatim}
is/2, >/2, </2, >=/2 ,=</2,
=:=/2, =\=/2, true/0, false/0,
=/2, ==/2, \=/2 ,\==/2, length/2
\end{verbatim}
The bodies can also contain the following
library predicates:
\begin{verbatim}
member/2, max_list/2, min_list/2
nth0/3, nth/3
\end{verbatim}
plus the predicate
\begin{verbatim}
average/2
\end{verbatim}
that, given a list of numbers, computes its arithmetic mean.
The syntax of ICL program is the one used by the \href{http://www.cs.ubc.ca/~poole/aibook/code/ailog/ailog2.html}{AILog 2} system.
\section{Inference}
\texttt{cplint} contains various modules for answering queries.
%It consists of three Prolog modules for answering queries using goal-oriented procedures plus
% three
%Prolog modules for answering queries using the definition of the semantics of LPADs and CP-logic.
These modules answer queries using using goal-oriented procedures:
\begin{itemize}
\item \texttt{lpadsld.pl}: uses the top-down procedure described in
in \cite{Rig-AIIA07-IC} and \cite{Rig-RCRA07-IC}. It is based on SLDNF resolution and is an adaptation of the interpreter for ProbLog \cite{DBLP:conf/ijcai/RaedtKT07}.
It was proved correct \cite{Rig-RCRA07-IC} with respect to the semantics of LPADs for range restricted acyclic programs \cite{DBLP:journals/ngc/AptB91} without function symbols.
It is also able to deal with extensions of LPADs and CP-logic: the clause bodies can contain \texttt{setof} and \texttt{bagof}, the probabilities in the head may be depend on variables in the body and it is possible to specify a uniform distribution in the head with reference to a \texttt{setof} or \texttt{bagof} operator. These extended features have been introduced in order to represent CLP(BN) \cite{SanPagQaz03-UAI-IC} programs and PRM models \cite{Getoor+al:JMLR02}:
\texttt{setof} and \texttt{bagof} allow to express dependency of an attribute from an aggregate function of another attribute, as in CLP(BN) and PRM, while the possibility of specifying a uniform distribution allows the use of the reference uncertainty feature of PRM.
\item \texttt{picl.pl}: performs inference on ICL programs \cite{Rig09-LJIGPL-IJ}
\item \texttt{lpad.pl}: uses a top-down procedure based on SLG resolution \cite{DBLP:journals/jacm/ChenW96}. As a consequence, it works for any sound LPADs, i.e., any LPAD such that each of its instances has a two valued well founded model.
\item \texttt{cpl.pl}: uses a top-down procedure based on SLG resolution and moreover checks that the CP-logic program is valid, i.e., that it has at least an execution model.
\item Modules for approximate inference:
\begin{itemize}
\item \texttt{deepit.pl} performs iterative deepening \cite{BraRig10-ILP10-IC}
\item \texttt{deepdyn.pl} performs dynamic iterative deepening \cite{BraRig10-ILP10-IC}
\item \texttt{bestk.pl} performs k-Best \cite{BraRig10-ILP10-IC}
\item \texttt{bestfirst.pl} performs best first \cite{BraRig10-ILP10-IC}
\item \texttt{montecarlo.pl} performs Monte Carlo \cite{BraRig10-ILP10-IC}
\item \texttt{mcintyre.pl}: implements the algorithm MCINTYRE (Monte Carlo INference wiTh Yap REcord) \cite{Rig11-CILC11-NC}
\end{itemize}
\item \texttt{approx/exact.pl} as \texttt{lpadsld.pl} but uses SimplecuddLPADs, a modification of the \href{www.cs.kuleuven.be/~theo/tools/simplecudd.html}{Simplecudd} instead of the \texttt{cplint} library for building BDDs and computing the probability.
\end{itemize}
These modules answer queries using the definition of the semantics of LPADs and CP-logic:
\begin{itemize}
\item \texttt{semlpadsld.pl}: given an LPAD $P$, it generates all the instances of $P$. The probability of a query $Q$ is computed by identifying all the instances where $Q$ is derivable by SLDNF resolution.
\item \texttt{semlpad.pl}: given an LPAD $P$, it generates all the instances of $P$. The probability of a query $Q$ is computed by identifying all the instances where $Q$ is derivable by SLG resolution.
\item \texttt{semlcpl.pl}: given an LPAD $P$, it builds an execution model of $P$, i.e., a probabilistic process that satisfy the principles of universal causation, sufficient causation, independent causation, no deus ex machina events and temporal precedence. It uses the definition of the semantics given in \cite{DBLP:journals/tplp/VennekensDB09}.
\end{itemize}
%For program with function symbols, the semantics of LPADs and CP-logic are not defined. However, the interpreter accepts programs with function symbols and, if it does not go into a loop, it returns an answer. What is the meaning of this answer is subject of current study.
\section{Commands}
All six modules accept the same commands for reading in files and answering queries.
\subsection{Commands}
%All six modules accept the same commands for reading in files and answering queries.
The LPAD or CP-logic program must be stored in a text file with extension \texttt{.cpl}. Suppose you have stored the example above in file \texttt{coin.cpl}.
In order to answer queries from this program, you have to run Yap,
load one of the modules (such as for example \texttt{lpad.pl}) by issuing the command
@ -168,7 +198,52 @@ after \texttt{p(file).} Moreover, you can build an execution process given a con
When using \texttt{cpl.pl} you can print a partial execution model including all the clauses involved in the query issued with \texttt{print.} \texttt{cpl.pl} can print the messages ``Uunsound program'', ``It requires the choice of a head atom from a non ground head'' and ``Invalid program''.
For \texttt{approx/deepit.pl} and \texttt{approx/deepdyn.pl} the command
\begin{verbatim}
solve(GoalsList, ProbLow, ProbUp, ResTime, BddTime)
\end{verbatim}
takes as input a list of goals \texttt{GoalsList} and returns a lower bound on the probability \texttt{ProbLow}, an upper bound on the probability \texttt{ProbUp}, the CPU time spent on performing resolution \texttt{ResTime} and the CPU time spent on handling BDDs \texttt{BddTime}.
For \texttt{approx/bestk.pl} the command
\begin{verbatim}
solve(GoalsList, ProbLow, ResTime, BddTime)
\end{verbatim}
takes as input a list of goals \texttt{GoalsList} and returns a lower bound on the probability \texttt{ProbLow}, the CPU time spent on performing resolution \texttt{ResTime} and the CPU time spent on handling BDDs \texttt{BddTime}.
For \texttt{approx/bestfirst.pl} the command
\begin{verbatim}
solve(GoalsList, ProbLow, ProbUp, Count, ResTime, BddTime)
\end{verbatim}
takes as input a list of goals \texttt{GoalsList} and returns a lower bound on the probability \texttt{ProbLow}, an upper bound on the probability \texttt{ProbUp}, the number of BDDs generated by the algorithm \texttt{Count}, the CPU time spent on performing resolution \texttt{ResTime} and the CPU time spent on handling BDDs \texttt{BddTime}.
For \texttt{approx/montecarlo.pl}
the command
\begin{verbatim}
solve(GoalsList, Samples, Time, Low, Prob, Up)
\end{verbatim}
takes as input a list of goals \texttt{GoalsList} and returns the number of samples taken \texttt{Samples},
the time required to solve the problem \texttt{Time}, the
lower end of the confidence interval \texttt{Lower}, the estimated probability \texttt{Prob} and the upper end of the confidence interval \texttt{Up}.
For \texttt{mcintyre.pl}:
the command
\begin{verbatim}
solve(Goals, Samples, CPUTime, WallTime, Lower, Prob, Upper) :-
\end{verbatim}
takes as input a conjunction of goals \texttt{Goals} and returns the number of samples taken \texttt{Samples},
the CPU time required to solve the problem \texttt{CPUTime}, the wall time required to solve the problem \texttt{CPUTime}, the
lower end of the confidence interval \texttt{Lower}, the estimated probability \texttt{Prob} and the upper end of the confidence interval \texttt{Up}.
For \texttt{approx/exact.pl}
the command
\begin{verbatim}
solve(GoalsList, Prob, ResTime, BddTime)
\end{verbatim}
takes as input a conjunction of goals \texttt{Goals} and returns the probability \texttt{Prob}, the CPU time spent on performing resolution \texttt{ResTime} and the CPU time spent on handling BDDs \texttt{BddTime}.
\subsubsection{Parameters}
The modules make use of a number of parameters in order to control their behavior. They that can be set with the command
\begin{verbatim}
set(parameter,value).
@ -182,7 +257,7 @@ from the Yap prompt.
The available parameters are:
\begin{itemize}
\item
\verb|epsilon_parsing| (valid for all six modules): if (1 - the sum of the probabilities of all the head atoms) is smaller than
\verb|epsilon_parsing| (valid for all modules): if (1 - the sum of the probabilities of all the head atoms) is smaller than
\verb|epsilon_parsing|
then \texttt{cplint} adds the null events to the head. Default value 0.00001
\item \verb|save_dot| (valid for all goal-oriented modules): if \texttt{true} a graph representing the BDD is saved in the file \texttt{cpl.dot} in the current directory in dot format.
@ -196,15 +271,20 @@ Variables: [(2,[X=2,X1=1]),(2,[X=1,X1=0]),(1,[])]
In the example above variable \texttt{X0} corresponds to clause \texttt{2} with the substitutions \texttt{X=2,X1=1},
variable \texttt{X1} corresponds to clause \texttt{2} with the substitutions \texttt{X=1,X1=0} and
variable \texttt{X2} corresponds to clause \texttt{1} with the empty substitution.
You can view the graph with \texttt{graphviz} (\url{www.graphviz.org}) using the
You can view the graph with \href{www.graphviz.org}{\texttt{graphviz}} using the
command
\begin{verbatim}
dotty cpl.dot &
\end{verbatim}
\item \verb|ground_body| (valid for \texttt{lpadsld.pl} and all semantic modules): determines how non ground clauses are treated: if \texttt{true}, ground clauses are obtained from a non ground clause by replacing each variable with a constant, if \texttt{false}, ground clauses are obtained by replacing only variables in the head with a constant. In the case where the body contains variables not in the head, setting it to false means that the body represents an existential event.
\item \verb|ground_body|: (valid for \texttt{lpadsld.pl} and all semantic modules) determines how non ground clauses are treated: if \texttt{true}, ground clauses are obtained from a non ground clause by replacing each variable with a constant, if \texttt{false}, ground clauses are obtained by replacing only variables in the head with a constant. In the case where the body contains variables not in the head, setting it to false means that the body represents an existential event.
\item \verb|min_error|: (valid for \texttt{approx/deepit.pl}, \texttt{approx/deepdyn.pl}, \texttt{approx/bestk.pl}, \texttt{approx/bestfirst.pl}, \texttt{approx/montecarlo.pl} and \texttt{mcintyre.pl}) is the threshold under which the difference between upper and lower bounds on probability must fall for the algorithm to stop.
\item \verb|k|: maximum number of explanations for \texttt{approx/bestk.pl} and \texttt{approx/bestfirst.pl} and number of samples to take at each iteration for \texttt{approx/montecarlo.pl} and \texttt{mcintyre.pl}
\item \verb|prob_bound|: (valid for \texttt{approx/deepit.pl}, \texttt{approx/deepdyn.pl}, \texttt{approx/bestk.pl} and \texttt{approx/bestfirst.pl}) is the initial bound on the probability of explanations when iteratively building explanations
\item \verb|prob_step|: (valid for \texttt{approx/deepit.pl}, \texttt{approx/deepdyn.pl}, \texttt{approx/bestk.pl} and \texttt{approx/bestfirst.pl}) is the increment on the bound on the probability of explanations when iteratively building explanations
\item \verb|timeout|: (valid for \texttt{approx/deepit.pl}, \texttt{approx/deepdyn.pl}, \texttt{approx/bestk.pl}, \texttt{approx/bestfirst.pl} and \texttt{approx/exact.pl}) timeout for builduing BDDs
\end{itemize}
\section{Semantic Modules}
\subsection{Semantic Modules}
The three semantic modules need to produce a grounding of the program in order to compute the semantics.
They require an extra file with extension \texttt{.uni} (for universe) in the same directory where the \texttt{.cpl} file is.
@ -228,42 +308,8 @@ The file \texttt{.uni} can contain both universe and mode declaration, the ones
With \texttt{semcpl.pl} only mode declarations can be used.
\section{Extensions}
In this section we will present the extensions to the syntax of LPADs and CP-logic programs that \texttt{cplint} can handle.
The first is the use of some standard Prolog predicates.
The bodies can contain the built-in predicates:
\begin{verbatim}
is/2
>/2
</2
>=/2
=</2
=:=/2
=\=/2
true/0
false/0
=/2
==/2
\=/2
\==/2
length/2
\end{verbatim}
The bodies can also contain the following
library predicates:
\begin{verbatim}
member/2
max_list/2
min_list/2
nth0/3
nth/3
\end{verbatim}
plus the predicate
\begin{verbatim}
average/2
\end{verbatim}
that, given a list of numbers, computes its arithmetic mean.
\subsection{Extensions}
In this section we will present the extensions to the syntax of LPADs and CP-logic programs that \texttt{lpadsld} can handle.
When using \texttt{lpadsld.pl}, the bodies can contain the predicates \texttt{setof/3} and \texttt{bagof/3} with the same meaning as in Prolog. Existential quantifiers are allowed in both, so for example the query
\begin{verbatim}
@ -284,13 +330,9 @@ male(C):M/P ; female(C):F/P:-
person(f).
known_female(a).
known_female(b).
known_female(c).
known_male(d).
known_male(e).
\end{verbatim}
The disjunctive rule expresses the probability of a person of unknown sex of being male or female depending on the number of males and females that are known.
@ -364,11 +406,11 @@ This is an example where the probabilities in the head do not sum up to one so t
The first clause states that, if the topic of a paper \texttt{X} is theory and of paper \texttt{Y} is theory, there is a probability of 0.005 that there is a citation from \texttt{X} to \texttt{Y}. The other clauses consider the remaining cases for the topics.
\section{Additional Files}
\subsection{Files}
In the directory where Yap keeps the library files (usually \texttt{/usr/local/share/ Yap}) you can find the directory \texttt{cplint} that contains the files:
\begin{itemize}
\item \verb|testlpadsld_gbtrue.pl, testlpadsld_gbfalse.pl, testlpad.pl,|
\verb|testcpl.pl, testsemlpadsld.pl, testsemlpad.pl testsemcpl.pl|: Prolog programs for testing the modules. They are executed when issuing the command \texttt{make installcheck} during the installation. To execute them afterwords, load the file and issue the command \texttt{t.}
\item \texttt{testlpadsld\_gbtrue.pl, testlpadsld\_gbfalse.pl, testlpad.pl,
testcpl.pl, testsemlpadsld.pl, testsemlpad.pl testsemcpl.pl}: Prolog programs for testing the modules. They are executed when issuing the command \texttt{make installcheck} during the installation. To execute them afterwords, load the file and issue the command \texttt{t.}
\item Subdirectory \texttt{examples}:
\begin{itemize}
\item \texttt{alarm.cpl}: representation of the Bayesian network in Figure 2 of
@ -388,22 +430,149 @@ In the directory where Yap keeps the library files (usually \texttt{/usr/local/s
source distribution of Yap in the \texttt{CLPBN} directory.
\item \verb|school_simple.cpl|: simplified version of \texttt{school.cpl}.
\item \verb|student.cpl|: student example from Figure 1.3 of \cite{GetFri01-BC}.
\item \texttt{win.cpl, light.cpl, trigger.cpl, throws.cpl, hiv.cpl,}\\ \texttt{ invalid.cpl}: programs taken from \cite{CP-logic-unp}. \texttt{invalid.cpl} is an example of a program that is invalid but sound.
\item \texttt{win.cpl, light.cpl, trigger.cpl, throws.cpl, hiv.cpl,}\\ \texttt{ invalid.cpl}: programs taken from \cite{DBLP:journals/tplp/VennekensDB09}. \texttt{invalid.cpl} is an example of a program that is invalid but sound.
\end{itemize}
The files \texttt{*.uni} that are present for some of the examples are used by the semantical modules. Some of the example files contain in an initial comment some queries together with their result.
\item Subdirectory \texttt{doc}: contains this manual in latex, html and pdf.
\end{itemize}
\section{Learning}
\texttt{cplint} contains the following learning algorithms:
\begin{itemize}
\item CEM (\texttt{cplint} EM): an implementation of EM for learning parameters that is based on \texttt{lpadsld.pl} \cite{RigDiM11-ML-IJ}
\item RIB (Relational Information Bottleneck): an algorithm for learning parameters based on the Information Bottleneck \cite{RigDiM11-ML-IJ}
\item EMBLEM (EM over Bdds for probabilistic Logic programs Efficient Mining): an implementation of EM for learning parameters that computes expectations directly on BDDs \cite{BelRig11-CILC11-NC,BelRig11-TR}
\item SLIPCASE (Structure LearnIng of ProbabilistiC logic progrAmS with Em over bdds): an algorithm for learning the structure of program that is based on EMBLEM \cite{BelRig11-ILP11-IC}
\end{itemize}
\subsection{Input}
To execute the learning algorithms, prepare four files in the same folder:
\begin{itemize}
\item \texttt{<stem>.kb}: contains the example interpretations
\item \texttt{<stem>.bg}: contains the background knowledge, i.e., knowledge valid for all interpretations
\item \texttt{<stem>.l}: contains language bias information
\item \texttt{<stem>.cpl}: contains the LPAD for you which you want to learn the parameters or the initial LPAD for SLIPCASE
\end{itemize}
where \texttt{<stem>} is your dataset name. Examples of these files can be found in the dataset pages.
In \texttt{<stem>.kb} the example interpretations have to be given as a list of Prolog facts initiated by
\texttt{begin(model(<name>)).} and terminated by \texttt{end(model(<name>)).} as in
\begin{verbatim}
begin(model(b1)).
sameperson(1,2).
movie(f1,1).
movie(f1,2).
workedunder(1,w1).
workedunder(2,w1).
gender(1,female).
gender(2,female).
actor(1).
actor(2).
end(model(b1)).
\end{verbatim}
The interpretations may contain a fact of the form
\begin{verbatim}
prob(0.3).
\end{verbatim}
assigning a probability (0.3 in this case) to the interpretations. If this is omitted, the probability of each interpretation is considered equal to $1/n$ where $n$ is the total number of interpretations. \verb|prob/1| can be used to set different multiplicity for the different interpretations.
In order for RIB to work, the input interpretations must share the Herbrand universe. If this is not the case, you have to translate the interpretations in this was, see for example the \texttt{sp1} files in RIB's folder, that are the results of the conversion of the first fold of the IMDB dataset.
\texttt{<stem>.bg} can contain Prolog clauses that can be used to derive additional conclusions from the atoms in
the interpretations.
\texttt{<stem>.l} contains the declarations of the input and output predicates, of the unseen predicates and the commands for setting the algorithms' parameters.
Output predicates are declared as
\begin{verbatim}
output(<predicate>/<arity>).
\end{verbatim}
and define the predicates whose atoms in the input interpretations are used as the goals for the prediction of which you want to optimize the parameters. Derivations for these goals are built by the systems.
Input predicates are those for the predictions of which you do not want to optimize the parameters. You can declare closed world input predicates with
\begin{verbatim}
input_cw(<predicate>/<arity>).
\end{verbatim}
For these predicates, the only true atoms are those in the interpretations, the clauses in the input program are not used to derive atoms not present in the interpretations.
Open world input predicates are declared with
\begin{verbatim}
input(<predicate>/<arity>).
\end{verbatim}
In this case, if a subgoal for such a predicate is encountered when deriving the atoms for the output predicates,
both the facts in the interpretations and the clauses of the input program are used.
For RIB, if there are unseen predicates, i.e., predicates that are present in the input program but not in the interpretations, you have to declare them with
\begin{verbatim}
unseen(<predicate>/<arity>).
\end{verbatim}
For SLIPCASE, you have to specify the language bias by means of mode declarations in the style of
\href{http://www.doc.ic.ac.uk/~shm/progol.html}{Progol}.
\begin{verbatim}
modeh(<recall>,<predicate>(<arg1>,...).
\end{verbatim}
specifies the atoms that can appear in the head of clauses, while
\begin{verbatim}
modeb(<recall>,<predicate>(<arg1>,...).
\end{verbatim}
specifies the atoms that can appear in the body of clauses.
\texttt{<recall>} can be an integer or \texttt{*} (currently unused).
The arguments are of the form
\begin{verbatim}
+<type>
\end{verbatim}
for specifying an input variable of type \texttt{<type>}, or
\begin{verbatim}
-<type>
\end{verbatim}
for specifying an output variable of type \texttt{<type>}.
\subsection{Parameters}
In order to set the algorithms' parameters, you have to insert in \texttt{<stem>.l} commands of the form
\begin{verbatim}
:- set(<parameter>,<value>).
\end{verbatim}
The available parameters are:
\begin{itemize}
\item \verb|depth| (values: integer or \verb|inf|, default value: 3): depth of derivations if \verb|depth_bound| is set to \verb|true|
\item \verb|single_var| (values: \verb|{true,false}|, default value: \verb|false|, valid for CEM only): if set to \verb|true|, there is a random variable for each clauses, instead of a separate random variable for each grounding of a clause
\item \verb|sample_size| (values: integer, default value: 1000): total number of examples in case in which the models in the \verb|.kb| file contain a \verb|prob(P).| fact. In that case, one model corresponds to \verb|sample_size*P| examples
\item \verb|epsilon_em| (values: real, default value: 0.1, valid for CEM only): if the difference in the log likelihood in two successive EM iteration is smaller
than \verb|epsilon_em|, then EM stops
\item \verb|epsilon_em_fraction| (values: real, default value: 0.01, valid for CEM only): if the difference in the log likelihood in two successive EM iteration is smaller
than \verb|epsilon_em_fraction|*(-current log likelihood), then EM stops
\item \verb|random_restarts_number| (values: integer, default value: 1, valid for CEM only): number of random restarts
\item \verb|setrand| (values: rand(integer,integer,integer)): seed for the random functions, see Yap manual for allowed values
\item \verb|minimal_step| (values: [0,1], default value: 0.005, valid for RIB only): minimal increment of $\gamma$
\item \verb|maximal_step| (values: [0,1], default value: 0.1, valid for RIB only): maximal increment of $\gamma$
\item \verb|logsize_fraction| (values: [0,1], default value 0.9, valid for RIB only): RIB stops when $\mathbf{I}(CH,T;Y)$ is above \verb|logsize_fraction| times its maximum value ($\log |CH,T|$, see \cite{DBLP:journals/jmlr/ElidanF05})
\item \verb|delta| (values: negative integer, default value -10, valid for RIB only): value assigned to $\log 0$
\item \verb|epsilon_fraction| (values: integer, default value 100, valid for RIB only): in the computation of the step, the value of $\epsilon$ of \cite{DBLP:journals/jmlr/ElidanF05} is obtained as $\log |CH,T|\times$\verb|epsilon_fraction|
\item \verb|max_rules| (values: integer, default value: 6000, valid for RIB only): maximum number of ground rules. Used to set the size of arrays for storing internal statistics. Can be increased as much as memory allows.
\end{itemize}
\subsection{Commands}
To execute CEM, load \texttt{em.pl} and call:
\begin{verbatim}
?:- em(stem).
\end{verbatim}
To execute RIB, load \texttt{rib.pl} and call:
\begin{verbatim}
?:- ib_par(stem).
\end{verbatim}
\section{License}
\label{license}
\texttt{cplint}, as Yap, follows the Artistic License 2.0 that you can find in Yap CVS root dir. The copyright is by Fabrizio Riguzzi.
\vspace{3mm}
The program uses the library \href{http://vlsi.colorado.edu/~fabio/}{CUDD} for manipulating BDDs that is included in glu.
The modules in the approx subdirectory use SimplecuddLPADs, a modification of the \href{www.cs.kuleuven.be/~theo/tools/simplecudd.html}{Simplecudd} library whose copyright is by Katholieke Universiteit Leuven and that follows the Artistic License 2.0.
\vspace{3mm}
Some modules use the library \href{http://vlsi.colorado.edu/~fabio/}{CUDD} for manipulating BDDs that is included in glu.
For the use of CUDD, the following license must be accepted:
\vspace{3mm}

View File

@ -4,8 +4,11 @@
Goal oriented interpreter for LPADs based on SLDNF
Copyright (c) 2007, Fabrizio Riguzzi
*/
:-dynamic rule/4,def_rule/2,setting/2.
%:- set_prolog_flag(debug,on).
%:- set_prolog_flag(discontiguous_warnings,on).
%:- set_prolog_flag(single_var_warnings,on).
%:- source.
:-dynamic rule/5,rule_by_num/8,rule_uniform/8,def_rule/2,setting/2.
:-use_module(library(lists)).
:-use_module(library(ugraphs)).
@ -16,14 +19,17 @@
set(Parameter,Value) */
setting(epsilon_parsing,0.00001).
setting(save_dot,false).
setting(ground_body,false).
setting(ground_body,true).
/* available values: true, false
if true, both the head and the body of each clause will be grounded, otherwise
only the head is grounded. In the case in which the body contains variables
not appearing in the head, the body represents an existential event */
setting(min_error,0.01).
setting(initial_depth_bound,4).
setting(depth_bound,4).
setting(prob_threshold,0.00001).
setting(prob_bound,0.01).
/* end of list of parameters */
/* s(GoalsLIst,Prob) compute the probability of a list of goals
@ -101,39 +107,43 @@ solve(GoalsList,Prob,CPUTime1,CPUTime2,WallTime1,WallTime2):-
format(user_error,"~nMemory after inference~n",[]),
print_mem.*/
/* iterative deepening, depth bounded
for negative goals, if their derivation is cut, then they are
added to the head of the list of goals to be resolved at the next depth bound*/
si(GoalsList,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,_]),
setting(depth_bound,D),
setting(initial_depth_bound,D),
solve_i([(GoalsList,[])],[],D,ProbL,ProbU),
statistics(cputime,[_,CT]),
CPUTime is CT/1000.
/* solve_i(L0,Succ,D,ProbL0,ProbU0): L0 is a list of couples (G,Der) where
G is a list of goals to be resolved and Der is an explanation, D is the
current depth, ProbL0 is the lower bound of the prob and ProbU0 is the upper
bound
*/
solve_i(L0,Succ,D,ProbL0,ProbU0):-
(findall((G1,Deriv),(member((G0,C0),L0),solvei(G0,D,C0,Deriv,G1)),L)->
findall((G1,Deriv),(member((G0,C0),L0),solvei(G0,D,C0,Deriv,G1)),L),
% print_mem,
separate_ulbi(L,[],LL0,[],LU,[],Incomplete),
append(Succ,LL0,LL),
compute_prob_deriv(LL,ProbL),
append(Succ,LU,LU1),
compute_prob_deriv(LU1,ProbU),
Err is ProbU-ProbL,
setting(min_error,ME),
(Err<ME->
ProbU0=ProbU,
ProbL0=ProbL
;
setting(depth_bound,DB),
D1 is D+DB,
solve_i(Incomplete,LL,D1,ProbL0,ProbU0)
)
separate_ulbi(L,[],LL0,[],LU,[],Incomplete),
append(Succ,LL0,LL),
compute_prob_deriv(LL,ProbL),
append(Succ,LU,LU1),
compute_prob_deriv(LU1,ProbU),
Err is ProbU-ProbL,
setting(min_error,ME),
(Err<ME->
ProbU0=ProbU,
ProbL0=ProbL
;
% print_mem,
ProbL0=0.0,
ProbU0=0.0
setting(depth_bound,DB),
D1 is D+DB,
solve_i(Incomplete,LL,D1,ProbL0,ProbU0)
).
/* iterative deepening, problog style: each time
the derivation is restarted from the original goal */
sir(GoalsList,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,_]),
setting(depth_bound,D),
@ -142,7 +152,11 @@ sir(GoalsList,ProbL,ProbU,CPUTime):-
CPUTime is CT/1000.
/* solveir(GoalsList,D,ProbL0,ProbU0) GoalsLIst is the list
of goals to be derived, D is the depth bound, ProbL0,ProbU0 are the lower
and upper bound. If for a certain depth bound the error is not smaller
than the threshold, the depth bound is increased and the derivation is
restarted from the beginning */
solveir(GoalsList,D,ProbL0,ProbU0):-
(setof(Deriv,find_derivr(GoalsList,D,Deriv),LDup)->
rem_dup_lists(LDup,[],L),
@ -166,7 +180,8 @@ solveir(GoalsList,D,ProbL0,ProbU0):-
ProbU0=0.0
).
/* approximate algorithm cilog2 style: the explanations with a prob below the
threshold are cut */
sic(GoalsList,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,_]),
setting(depth_bound,D),
@ -174,8 +189,6 @@ sic(GoalsList,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,CT]),
CPUTime is CT/1000.
solveic(GoalsList,D,ProbL0,ProbU0):-
(setof((Deriv,P,Pruned),solvec(GoalsList,D,[],Deriv,1.0,P,Pruned),L)->
% print_mem,
@ -194,16 +207,24 @@ solveic(GoalsList,D,ProbL0,ProbU0):-
).
compute_prob_deriv(LL,ProbL):-
build_formula(LL,FormulaL,[],VarL,0,ConjL),
length(LL,NDL),
length(VarL,NVL),
build_formula(LL,FormulaL,[],VarL,0,_ConjL),
%length(LL,NDL),
%length(VarL,NVL),
%format(user_error,"Disjunctions :~d~nConjunctions: ~d~nVariables ~d~n",[NDL,ConjL,NVL]),
var2numbers(VarL,0,NewVarL),
(setting(save_dot,true)->
% format("Variables: ~p~n",[VarL]),
compute_prob(NewVarL,FormulaL,ProbL,1)
(FormulaL=[]->
ProbL=0.0
;
compute_prob(NewVarL,FormulaL,ProbL,0)
(FormulaL=[[]|_]->
ProbL=1.0
;
(setting(save_dot,true)->
% format("Variables: ~p~n",[VarL]),
compute_prob(NewVarL,FormulaL,ProbL,1)
;
compute_prob(NewVarL,FormulaL,ProbL,0)
)
)
).
print_mem:-
@ -313,7 +334,7 @@ solve_condi(LGoals,LEvidence,SuccGE,SuccE,D,ProbL0,ProbU0):-
ProbU0=undefined
).
/* iterative deepening, problog style */
scir(Goals,Evidence,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,_]),
setting(depth_bound,D),
@ -363,6 +384,7 @@ solve_condir(Goals,Evidence,D,ProbL0,ProbU0):-
ProbU0=undefined
).
/* approximate algorithm cilog2 style */
scic(Goals,Evidence,ProbL,ProbU,CPUTime):-
statistics(cputime,[_,_]),
setting(depth_bound,D),
@ -468,11 +490,19 @@ solve_cond_goals(Goals,LE,0,Time1,0):-
Time1 is T1/1000.
call_compute_prob(NewVarGE,FormulaGE,ProbGE):-
(setting(save_dot,true)->
format("Variables: ~p~n",[NewVarGE]),
compute_prob(NewVarGE,FormulaGE,ProbGE,1)
(FormulaGE=[]->
ProbGE=0.0
;
compute_prob(NewVarGE,FormulaGE,ProbGE,0)
(FormulaGE=[[]|_]->
ProbGE=1.0
;
(setting(save_dot,true)->
format("Variables: ~p~n",[NewVarGE]),
compute_prob(NewVarGE,FormulaGE,ProbGE,1)
;
compute_prob(NewVarGE,FormulaGE,ProbGE,0)
)
)
).
find_deriv_GE(LD,GoalsList,Deriv):-
@ -572,7 +602,7 @@ solvei(G,0,C,C,G):-!.
solvei([\+ H |T],DB,CIn,COut,G):-!,
list2and(HL,H),
(findall((GH,D),solvei(HL,DB,CIn,D,GH),L)->
separate_ulbi(L,[],LB,[],UB,[],I),
separate_ulbi(L,[],LB,[],_UB,[],I),
(I\=[]->
C1=CIn,
G=[\+ H|G1]
@ -680,7 +710,6 @@ solvec([H|T],DB,CIn,COut,P0,P1,Pruned):-
solve_presc(R,S,N,B,T,DB1,CIn,COut,P,P0,P1,Pruned).
solve_pres(R,S,N,B,T,CIn,COut):-
member_eq((N,R,S),CIn),!,
append(B,T,NG),
@ -782,16 +811,16 @@ returns the index R of a disjunctive rule resolving with G together with
the index N of the resolving head, the substitution S and the Body of the
rule */
find_rule(H,(R,S,N),Body,C):-
rule(R,S,_,Head,Body),
rule(H,_P,N,R,S,_,Head,Body),
member_head(H,Head,0,N),
not_already_present_with_a_different_head(N,R,S,C).
find_rule(H,(R,S,Number),Body,C):-
rule(R,S,_,uniform(H:1/_Num,_P,Number),Body),
rule_uniform(H,R,S,_,1/_Num,_L,Number,Body),
not_already_present_with_a_different_head(Number,R,S,C).
find_rulec(H,(R,S,N),Body,C,P):-
rule(R,S,_,Head,Body),
rule(H,_P,N,R,S,_,Head,Body),
member_headc(H,Head,0,N,P),
not_already_present_with_a_different_head(N,R,S,C).
@ -864,7 +893,7 @@ choose_clausesc(CIn,[D|T],COut,P0,P1):-
new_head(N,R,S,N1),
\+ already_present(N1,R,S,CIn),
impose_dif_cons(R,S,CIn),
rule(R,S,_Numbers,Head,_Body),
rule_by_num(R,S,_Numbers,Head,_Body),
nth0(N1, Head, (_H:P), _Rest),
P2 is P0*P,
choose_clausesc([(N1,R,S)|CIn],T,COut,P2,P1).
@ -931,7 +960,7 @@ choose_a_headc(N,R,S,[(NH,R,SH)|T],[(NH,R,S),(NH,R,SH)|T],P0,P1):-
\+ \+ S=SH, S\==SH,
dif(N,NH),
dif(S,SH),
rule(R,S,_Numbers,Head,_Body),
rule_by_num(R,S,_Numbers,Head,_Body),
nth0(NH, Head, (_H:P), _Rest),
P1 is P0*P.
@ -959,13 +988,13 @@ choose_a_head(N,R,S,[H|T],[H|T1]):-
/* select a head different from N for rule R with
substitution S, return it in N1 */
new_head(N,R,S,N1):-
rule(R,S,Numbers,Head,_Body),
rule_by_num(R,S,Numbers,Head,_Body),
Head\=uniform(_,_,_),!,
nth0(N, Numbers, _Elem, Rest),
member(N1,Rest).
new_head(N,R,S,N1):-
rule(R,S,Numbers,uniform(_A:1/Tot,_L,_Number),_Body),
rule_by_num(R,S,Numbers,uniform(_A:1/Tot,_L,_Number),_Body),
listN(0,Tot,Numbers),
nth0(N, Numbers, _Elem, Rest),
member(N1,Rest).
@ -1005,11 +1034,7 @@ member_subset(E,[_H|T]):-
member_subset(E,T).
separate_ulbi([],L,L,U,U,I,I):-!.
/*
separate_ulb([H|T],L0,L1,U0,[H|U1]):-
member(pruned,H),!,
separate_ulb(T,L0,L1,U0,U1).
*/
separate_ulbi([([],H)|T],L0,[H|L1],U0,[H|U1],I0,I1):-
!,
separate_ulbi(T,L0,L1,U0,U1,I0,I1).
@ -1020,11 +1045,7 @@ separate_ulbi([(G,H)|T],L0,L1,U0,[H1|U1],I0,[(G,H)|I1]):-
separate_ulb([],L,L,U,U):-!.
/*
separate_ulb([H|T],L0,L1,U0,[H|U1]):-
member(pruned,H),!,
separate_ulb(T,L0,L1,U0,U1).
*/
separate_ulb([H|T],L0,[H|L1],U0,[H|U1]):-
ground(H),!,
separate_ulb(T,L0,L1,U0,U1).
@ -1036,21 +1057,20 @@ separate_ulb([H|T],L0,L1,U0,[H1|U1]):-
separate_ulbc([],L,L,P,P):-!.
separate_ulbc([(H,P,true)|T],L0,L1,P0,P1):-!,
separate_ulbc([(_H,P,true)|T],L0,L1,P0,P1):-!,
P2 is P0+P,
separate_ulbc(T,L0,L1,P2,P1).
separate_ulbc([(H,_P,false)|T],L0,[H|L1],P0,P1):-
separate_ulbc(T,L0,L1,P0,P1).
get_ground([],[]):-!.
get_ground([H|T],[H|T1]):-
ground(H),!,
get_ground(T,T1).
get_ground([H|T],T1):-
get_ground([_H|T],T1):-
get_ground(T,T1).
@ -1116,7 +1136,7 @@ var2numbers([(R,S)|T],N,[[N,ValNumber,Probs]|TNV]):-
var2numbers(T,N1,TNV).
find_probs(R,S,Probs):-
rule(R,S,_N,Head,_Body),
rule_by_num(R,S,_N,Head,_Body),
get_probs(Head,Probs).
get_probs(uniform(_A:1/Num,_P,_Number),ListP):-
@ -1150,11 +1170,13 @@ parse(File):-
open(FilePl,read,S),
read_clauses(S,C),
close(S),
retractall(rule(_,_,_,_,_)),
retractall(def_rule(_,_)),
retractall(rule_by_num(_,_,_,_,_)),
retractall(rule(_,_,_,_,_,_,_,_)),
retractall(def_rule(_,_)),
retractall(rule_uniform(_,_,_,_,_,_,_,_)),
process_clauses(C,1).
process_clauses([(end_of_file,[])],_N).
process_clauses([(end_of_file,[])],_N):-!.
process_clauses([((H:-B),V)|T],N):-
H=uniform(A,P,L),!,
@ -1163,7 +1185,8 @@ process_clauses([((H:-B),V)|T],N):-
remove_vars([P],V1,V2),
append(BL,[length(L,Tot),nth0(Number,L,P)],BL1),
append(V2,['Tot'=Tot],V3),
assertz(rule(N,V3,_NH,uniform(A:1/Tot,L,Number),BL1)),
assertz(rule_by_num(N,V3,_NH,uniform(A:1/Tot,L,Number),BL1)),
assertz(rule_uniform(A,N,V3,_NH,1/Tot,L,Number,BL1)),
N1 is N+1,
process_clauses(T,N1).
@ -1175,7 +1198,8 @@ process_clauses([((H:-B),V)|T],N):-
process_body(BL,V,V1),
length(HL,LH),
listN(0,LH,NH),
assertz(rule(N,V1,NH,HL,BL)),
assert_rules(HL,0,HL,BL,NH,N,V1),
assertz(rule_by_num(N,V1,NH,HL,BL)),
N1 is N+1,
process_clauses(T,N1).
@ -1187,7 +1211,8 @@ process_clauses([((H:-B),V)|T],N):-
process_body(BL,V,V1),
length(HL,LH),
listN(0,LH,NH),
assertz(rule(N,V1,NH,HL,BL)),
assert_rules(HL,0,HL,BL,NH,N,V1),
assertz(rule_by_num(N,V1,NH,HL,BL)),
N1 is N+1,
process_clauses(T,N1).
@ -1202,7 +1227,8 @@ process_clauses([(H,V)|T],N):-
process_head(HL1,HL),
length(HL,LH),
listN(0,LH,NH),
assertz(rule(N,V,NH,HL,[])),
assert_rules(HL,0,HL,[],NH,N,V),
assertz(rule_by_num(N,V,NH,HL,[])),
N1 is N+1,
process_clauses(T,N1).
@ -1212,7 +1238,8 @@ process_clauses([(H,V)|T],N):-
process_head(HL1,HL),
length(HL,LH),
listN(0,LH,NH),
assertz(rule(N,V,NH,HL,[])),
assert_rules(HL,0,HL,[],NH,N,V),
assertz(rule_by_num(N,V,NH,HL,[])),
N1 is N+1,
process_clauses(T,N1).
@ -1220,6 +1247,16 @@ process_clauses([(H,_V)|T],N):-
assert(def_rule(H,[])),
process_clauses(T,N).
assert_rules([],_Pos,_HL,_BL,_Nh,_N,_V1):-!.
assert_rules(['':_P],_Pos,_HL,_BL,_Nh,_N,_V1):-!.
assert_rules([H:P|T],Pos,HL,BL,NH,N,V1):-
assertz(rule(H,P,Pos,N,V1,NH,HL,BL)),
Pos1 is Pos+1,
assert_rules(T,Pos1,HL,BL,NH,N,V1).
/* if the annotation in the head are not ground, the null atom is not added
and the eventual formulas are not evaluated */

View File

@ -120,7 +120,7 @@ test((s([on(4,1)],P),close_to(P,0.0658436213991769)),threesideddice,_).
test((sc([on(2,1)],[on(0,1)],P),close_to(P,0.222222222222222)),threesideddice,_).
test((sc([on(2,1)],[on(1,1)],P),close_to(P,0.333333333333333)),threesideddice,_).
test((sc([on(4,1)],[on(1,1)],P),close_to(P, 0.148148148148148)),threesideddice,_).
/*test((sc([on(4,1)],[on(1,1)],P),close_to(P, 0.148148148148148)),threesideddice,_). */
test((sc([on(5,1)],[on(2,1)],P),close_to(P, 0.148148148148148)),threesideddice,_).