Merge branch 'master' of github.com:tacgomes/yap6.3
This commit is contained in:
commit
6060899773
@ -135,7 +135,7 @@ install: $(CLBN_TOP) $(CLBN_PROGRAMS) $(CLPBN_LEARNING_PROGRAMS) $(CLPBN_SCHOOL_
|
||||
docs: $(MANUAL)
|
||||
$(PDFLATEX) $(PFL_MANUAL)
|
||||
$(PDFLATEX) $(PFL_MANUAL)
|
||||
rm pfl.aux pfl.bbl pfl.blg pfl.log pfl.out
|
||||
rm -f pfl.aux pfl.bbl pfl.blg pfl.log pfl.out
|
||||
|
||||
|
||||
install_docs: docs
|
||||
|
@ -1,5 +1,4 @@
|
||||
|
||||
|
||||
function prepare_new_run
|
||||
{
|
||||
YAP=~/bin/$SHORTNAME-$SOLVER
|
||||
@ -17,32 +16,33 @@ function prepare_new_run
|
||||
|
||||
function run_solver
|
||||
{
|
||||
constraint=$1
|
||||
echo $LOG_FILE
|
||||
CONSTRAINT=$1
|
||||
solver_flag=true
|
||||
if [ -n "$2" ]; then
|
||||
if [ $SOLVER = hve ]; then
|
||||
solver_flag=clpbn_horus:set_horus_flag\(elim_heuristic,$2\)
|
||||
SOLVER_FLAG=set_horus_flag\(hve_elim_heuristic,$2\)
|
||||
elif [ $SOLVER = bp ]; then
|
||||
solver_flag=clpbn_horus:set_horus_flag\(schedule,$2\)
|
||||
SOLVER_FLAG=set_horus_flag\(bp_msg_schedule,$2\)
|
||||
elif [ $SOLVER = cbp ]; then
|
||||
solver_flag=clpbn_horus:set_horus_flag\(schedule,$2\)
|
||||
SOLVER_FLAG=set_horus_flag\(bp_msg_schedule,$2\)
|
||||
elif [ $SOLVER = lbp ]; then
|
||||
solver_flag=clpbn_horus:set_horus_flag\(schedule,$2\)
|
||||
SOLVER_FLAG=set_horus_flag\(bp_msg_schedule,$2\)
|
||||
else
|
||||
echo "unknow flag $2"
|
||||
fi
|
||||
fi
|
||||
/usr/bin/time -o $LOG_FILE -a -f "%U\t%S\t%e\t%M" \
|
||||
$YAP << EOF >> $LOG_FILE &>> ignore.$LOG_FILE
|
||||
$YAP << EOF >> ignore.$LOG_FILE 2>> ignore.$LOG_FILE
|
||||
nogc.
|
||||
[$NETWORK].
|
||||
[$constraint].
|
||||
clpbn_horus:set_solver($SOLVER).
|
||||
clpbn_horus:set_horus_flag(use_logarithms, true).
|
||||
clpbn_horus:set_horus_flag(verbosity, 1).
|
||||
$solver_flag.
|
||||
[$CONSTRAINT].
|
||||
set_solver($SOLVER).
|
||||
set_horus_flag(verbosity, 1).
|
||||
set_horus_flag(use_logarithms, true).
|
||||
$SOLVER_FLAG.
|
||||
$QUERY.
|
||||
open("$LOG_FILE", 'append', S), format(S, '$constraint ~15+ ', []), close(S).
|
||||
open("$LOG_FILE", 'append', S), format(S, "$CONSTRAINT ~15+ ", []), close(S).
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -52,12 +52,16 @@ function clear_log_files
|
||||
{
|
||||
rm -f *~
|
||||
rm -f ../*~
|
||||
rm -f school/*.log school/*~
|
||||
rm -f ../school/*.log ../school/*~
|
||||
rm -f city/*.log city/*~
|
||||
rm -f ../city/*.log ../city/*~
|
||||
rm -f workshop_attrs/*.log workshop_attrs/*~
|
||||
rm -f ../workshop_attrs/*.log ../workshop_attrs/*~
|
||||
rm -f comp_workshops/*.log comp_workshops/*~
|
||||
rm -f ../comp_workshops/*.log ../comp_workshops/*~
|
||||
rm -f city/*.log city/*~
|
||||
rm -f ../city/*.log ../city/*~
|
||||
rm -f social_network2/*.log social_network2/*~
|
||||
rm -f ../social_network2/*.log ../social_network2/*~
|
||||
rm -f social_network2_evidence/*.log social_network2_evidence/*~
|
||||
rm -f ../social_network2_evidence/*.log ../social_network2_evidence/*~
|
||||
echo all done!
|
||||
}
|
||||
|
||||
|
@ -33,5 +33,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "bp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "bp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -32,5 +32,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "cbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "cbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/city'"
|
||||
NETWORK="'../../examples/city.pfl'"
|
||||
SHORTNAME="city"
|
||||
QUERY="is_joe_guilty(X)"
|
||||
|
||||
|
@ -19,7 +19,7 @@ main :-
|
||||
generate_people(S, N, Counting) :-
|
||||
Counting > N, !.
|
||||
generate_people(S, N, Counting) :-
|
||||
format(S, 'people(p~w, nyc).~n', [Counting]),
|
||||
format(S, 'person(p~w, nyc).~n', [Counting]),
|
||||
Counting1 is Counting + 1,
|
||||
generate_people(S, N, Counting1).
|
||||
|
||||
|
@ -33,5 +33,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "hve(elim_heuristic=min_neighbors) " min_neighbors
|
||||
run_all_graphs "hve(hve_elim_heuristic=min_neighbors) " min_neighbors
|
||||
|
||||
|
@ -32,5 +32,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "lbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "lbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -27,5 +27,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "bp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "bp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "cbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "cbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/comp_workshops'"
|
||||
NETWORK="'../../examples/comp_workshops.pfl'"
|
||||
SHORTNAME="cw"
|
||||
QUERY="series(X)"
|
||||
|
||||
|
@ -29,7 +29,7 @@ gen(S, NP, NW, Count) :-
|
||||
gen_workshops(_, _, NW, Count) :-
|
||||
Count > NW, !.
|
||||
gen_workshops(S, P, NW, Count) :-
|
||||
format(S, 'c(p~w,w~w).~n', [P,Count]),
|
||||
format(S, 'reg(p~w,w~w).~n', [P,Count]),
|
||||
Count1 is Count + 1,
|
||||
gen_workshops(S, P, NW, Count1).
|
||||
|
||||
|
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "hve(elim_heuristic=min_neighbors) " min_neighbors
|
||||
run_all_graphs "hve(hve_elim_heuristic=min_neighbors) " min_neighbors
|
||||
|
||||
|
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "lbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "lbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -24,7 +24,7 @@ source lbp_tests.sh
|
||||
source cbp_tests.sh
|
||||
cd ..
|
||||
|
||||
cd smokers
|
||||
cd social_network2
|
||||
source hve_tests.sh
|
||||
source bp_tests.sh
|
||||
source lve_tests.sh
|
||||
|
@ -1,95 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
#cp ~/bin/yap ~/bin/school_all
|
||||
#YAP=~/bin/school_all
|
||||
YAP=~/bin/yap
|
||||
source ../benchs.sh
|
||||
|
||||
#OUT_FILE_NAME=results`date "+ %H:%M:%S %d-%m-%Y"`.log
|
||||
OUT_FILE_NAME=results.log
|
||||
rm -f $OUT_FILE_NAME
|
||||
rm -f ignore.$OUT_FILE_NAME
|
||||
SHORTNAME="school"
|
||||
SOLVER="school"
|
||||
|
||||
# yap -g "['../../../../examples/School/sch32'], [missing5], use_module(library(clpbn/learning/em)), graph(L), clpbn:set_clpbn_flag(em_solver,bp), clpbn_horus:set_horus_flag(inf_alg, bp), statistics(runtime, _), em(L,0.01,10,_,Lik), statistics(runtime, [T,_])."
|
||||
|
||||
function run_solver
|
||||
function learn_params
|
||||
{
|
||||
if [ $2 = bp ]
|
||||
then
|
||||
if [ $4 = ve ]
|
||||
then
|
||||
extra_flag1=clpbn_horus:set_horus_flag\(inf_alg,$4\)
|
||||
extra_flag2=clpbn_horus:set_horus_flag\(elim_heuristic,$5\)
|
||||
else
|
||||
extra_flag1=clpbn_horus:set_horus_flag\(inf_alg,$4\)
|
||||
extra_flag2=clpbn_horus:set_horus_flag\(schedule,$5\)
|
||||
fi
|
||||
else
|
||||
extra_flag1=true
|
||||
extra_flag2=true
|
||||
fi
|
||||
/usr/bin/time -o "$OUT_FILE_NAME" -a -f "real:%E\tuser:%U\tsys:%S" $YAP << EOF &>> "ignore.$OUT_FILE_NAME"
|
||||
:- [pos:train].
|
||||
:- ['../../../../examples/School/sch32'].
|
||||
:- use_module(library(clpbn/learning/em)).
|
||||
:- use_module(library(clpbn/bp)).
|
||||
[$1].
|
||||
NETWORK="'./../../examples/School/school_32'"
|
||||
CONSTRAINT=$2
|
||||
SOLVER=$1
|
||||
echo $NETWORK
|
||||
/usr/bin/time -o $LOG_FILE -a -f "%U\t%S\t%e\t%M" \
|
||||
$YAP << EOF >> ignore.$LOG_FILE 2>> ignore.$LOG_FILE
|
||||
use_module(library(pfl)).
|
||||
use_module(library(clpbn/learning/em)).
|
||||
[$NETWORK].
|
||||
[$CONSTRAINT].
|
||||
set_em_solver($SOLVER).
|
||||
graph(L),
|
||||
clpbn:set_clpbn_flag(em_solver,$2),
|
||||
$extra_flag1, $extra_flag2,
|
||||
em(L,0.01,10,_,Lik),
|
||||
open("$OUT_FILE_NAME", 'append',S),
|
||||
format(S, '$3: ~11+ Lik = ~3f, ',[Lik]),
|
||||
close(S).
|
||||
% em(L, 0.01, 10, _, Lik),
|
||||
open("$LOG_FILE", 'append', S),
|
||||
format(S, "$CONSTRAINT: ~15+ Lik = ~3f\t", [Lik]),
|
||||
close(S).
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
function run_all_graphs
|
||||
{
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
echo "results for solver $2" >> "$OUT_FILE_NAME"
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
run_solver missing5 $1 missing5 $3 $4 $5
|
||||
run_solver missing10 $1 missing10 $3 $4 $5
|
||||
#run_solver missing20 $1 missing20 $3 $4 $5
|
||||
#run_solver missing30 $1 missing30 $3 $4 $5
|
||||
#run_solver missing40 $1 missing40 $3 $4 $5
|
||||
#run_solver missing50 $1 missing50 $3 $4 $5
|
||||
}
|
||||
prepare_new_run
|
||||
|
||||
|
||||
#run_all_graphs bp "hve(min_neighbors) " ve min_neighbors
|
||||
#run_all_graphs bp "bp(seq_fixed) " bp seq_fixed
|
||||
#run_all_graphs bp "cbp(seq_fixed) " cbp seq_fixed
|
||||
exit
|
||||
write_header hve
|
||||
learn_params hve missing5
|
||||
learn_params hve missing10
|
||||
learn_params hve missing20
|
||||
#learn_params hve missing30
|
||||
#learn_params hve missing40
|
||||
#learn_params hve missing50
|
||||
|
||||
write_header ve
|
||||
learn_params ve missing5
|
||||
learn_params ve missing10
|
||||
learn_params ve missing20
|
||||
#learn_params ve missing30
|
||||
#learn_params ve missing40
|
||||
#learn_params hve missing50
|
||||
|
||||
run_all_graphs bp "hve(min_neighbors) " ve min_neighbors
|
||||
run_all_graphs bp "hve(min_weight) " ve min_weight
|
||||
run_all_graphs bp "hve(min_fill) " ve min_fill
|
||||
run_all_graphs bp "hve(w_min_fill) " ve weighted_min_fill
|
||||
run_all_graphs bp "bp(seq_fixed) " bp seq_fixed
|
||||
run_all_graphs bp "bp(max_residual) " bp max_residual
|
||||
run_all_graphs bp "cbp(seq_fixed) " cbp seq_fixed
|
||||
run_all_graphs bp "cbp(max_residual) " cbp max_residual
|
||||
run_all_graphs gibbs "gibbs "
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
echo "results for solver ve" >> "$OUT_FILE_NAME"
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
run_solver missing5 ve missing5 $3 $4 $5
|
||||
run_solver missing10 ve missing10 $3 $4 $5
|
||||
run_solver missing20 ve missing20 $3 $4 $5
|
||||
run_solver missing30 ve missing30 $3 $4 $5
|
||||
run_solver missing40 ve missing40 $3 $4 $5
|
||||
#run_solver missing50 ve missing50 $3 $4 $5 #+24h!
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
echo "results for solver jt" >> "$OUT_FILE_NAME"
|
||||
echo "************************************************************************" >> "$OUT_FILE_NAME"
|
||||
run_solver missing5 jt missing5 $3 $4 $5
|
||||
run_solver missing10 jt missing10 $3 $4 $5
|
||||
run_solver missing20 jt missing20 $3 $4 $5
|
||||
#run_solver missing30 jt missing30 $3 $4 $5 #+24h!
|
||||
#run_solver missing40 jt missing40 $3 $4 $5 #+24h!
|
||||
#run_solver missing50 jt missing50 $3 $4 $5 #+24h!
|
||||
exit
|
||||
write_header bp
|
||||
learn_params bp missing5
|
||||
learn_params bp missing10
|
||||
learn_params bp missing20
|
||||
#learn_params bp missing30
|
||||
#learn_params bp missing40
|
||||
#learn_params bp missing50
|
||||
|
||||
write_header cbp
|
||||
learn_params cbp missing5
|
||||
learn_params cbp missing10
|
||||
learn_params cbp missing20
|
||||
#learn_params cbp missing30
|
||||
#learn_params cbp missing40
|
||||
#learn_params cbp missing50
|
||||
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/social_domain2'"
|
||||
SHORTNAME="sm"
|
||||
QUERY="query(X)"
|
||||
|
||||
POP=500
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="bp"
|
||||
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "bp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "bp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="cbp"
|
||||
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "cbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "cbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
@ -17,7 +17,7 @@ main :-
|
||||
generate_people(S, N, Counting) :-
|
||||
Counting > N, !.
|
||||
generate_people(S, N, Counting) :-
|
||||
format(S, 'people(p~w).~n', [Counting]),
|
||||
format(S, 'person(p~w).~n', [Counting]),
|
||||
Counting1 is Counting + 1,
|
||||
generate_people(S, N, Counting1).
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="hve"
|
||||
@ -26,8 +26,8 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "hve(elim_heuristic=min_neighbors) " min_neighbors
|
||||
#run_all_graphs "hve(elim_heuristic=min_weight) " min_weight
|
||||
#run_all_graphs "hve(elim_heuristic=min_fill) " min_fill
|
||||
#run_all_graphs "hve(elim_heuristic=weighted_min_fill) " weighted_min_fill
|
||||
run_all_graphs "hve(hve_elim_heuristic=min_neighbors) " min_neighbors
|
||||
#run_all_graphs "hve(hve_elim_heuristic=min_weight) " min_weight
|
||||
#run_all_graphs "hve(hve_elim_heuristic=min_fill) " min_fill
|
||||
#run_all_graphs "hve(hve_elim_heuristic=weighted_min_fill) " weighted_min_fill
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="lbp"
|
||||
@ -26,5 +26,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "lbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "lbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="lve"
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/social_domain2'"
|
||||
SHORTNAME="sm"
|
||||
NETWORK="'../../examples/social_network2.pfl'"
|
||||
SHORTNAME="sn2"
|
||||
#QUERY="smokes(p1,t), smokes(p2,t), friends(p1,p2,X)"
|
||||
QUERY="friends(p1,p2,X)"
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2ev.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="bp"
|
||||
@ -30,5 +30,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "bp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "bp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2ev.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="cbp"
|
||||
@ -30,5 +30,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "cbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "cbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
@ -26,7 +26,7 @@ main :-
|
||||
generate_people(S, N, Counting) :-
|
||||
Counting > N, !.
|
||||
generate_people(S, N, Counting) :-
|
||||
format(S, 'people(p~w).~n', [Counting]),
|
||||
format(S, 'person(p~w).~n', [Counting]),
|
||||
Counting1 is Counting + 1,
|
||||
generate_people(S, N, Counting1).
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2ev.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="hve"
|
||||
@ -30,8 +30,8 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "hve(elim_heuristic=min_neighbors) " min_neighbors
|
||||
#run_all_graphs "hve(elim_heuristic=min_weight) " min_weight
|
||||
#run_all_graphs "hve(elim_heuristic=min_fill) " min_fill
|
||||
#run_all_graphs "hve(elim_heuristic=weighted_min_fill) " weighted_min_fill
|
||||
run_all_graphs "hve(hve_elim_heuristic=min_neighbors) " min_neighbors
|
||||
#run_all_graphs "hve(hve_elim_heuristic=min_weight) " min_weight
|
||||
#run_all_graphs "hve(hve_elim_heuristic=min_fill) " min_fill
|
||||
#run_all_graphs "hve(hve_elim_heuristic=weighted_min_fill) " weighted_min_fill
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
source sm.sh
|
||||
source sn2ev.sh
|
||||
source ../benchs.sh
|
||||
|
||||
SOLVER="lve"
|
8
packages/CLPBN/benchmarks/social_network2_evidence/sn2ev.sh
Executable file
8
packages/CLPBN/benchmarks/social_network2_evidence/sn2ev.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/social_network2.pfl'"
|
||||
SHORTNAME="sn2ev"
|
||||
QUERY="query(X)"
|
||||
|
||||
POP=500
|
||||
|
@ -33,5 +33,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "bp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "bp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -32,5 +32,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "cbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "cbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -23,7 +23,7 @@ main :-
|
||||
generate_people(S, N, Counting) :-
|
||||
Counting > N, !.
|
||||
generate_people(S, N, Counting) :-
|
||||
format(S, 'people(p~w).~n', [Counting]),
|
||||
format(S, 'person(p~w).~n', [Counting]),
|
||||
Counting1 is Counting + 1,
|
||||
generate_people(S, N, Counting1).
|
||||
|
||||
@ -31,9 +31,9 @@ generate_people(S, N, Counting) :-
|
||||
generate_attrs(S, N, Counting) :-
|
||||
Counting > N, !.
|
||||
generate_attrs(S, N, Counting) :-
|
||||
%format(S, 'people(p~w).~n', [Counting]),
|
||||
%format(S, 'person(p~w).~n', [Counting]),
|
||||
format(S, 'markov attends(P)::[t,f], attr~w::[t,f]', [Counting]),
|
||||
format(S, '; [0.7, 0.3, 0.3, 0.3] ; [people(P)].~n',[]),
|
||||
format(S, '; [0.7, 0.3, 0.3, 0.3] ; [person(P)].~n',[]),
|
||||
Counting1 is Counting + 1,
|
||||
generate_attrs(S, N, Counting1).
|
||||
|
||||
|
@ -32,5 +32,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "hve(elim_heuristic=min_neighbors) " min_neighbors
|
||||
run_all_graphs "hve(hve_elim_heuristic=min_neighbors) " min_neighbors
|
||||
|
||||
|
@ -32,5 +32,5 @@ function run_all_graphs
|
||||
}
|
||||
|
||||
prepare_new_run
|
||||
run_all_graphs "lbp(shedule=seq_fixed) " seq_fixed
|
||||
run_all_graphs "lbp(bp_msg_shedule=seq_fixed) " seq_fixed
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
NETWORK="'../../examples/workshop_attrs'"
|
||||
NETWORK="'../../examples/workshop_attrs.pfl'"
|
||||
SHORTNAME="wa"
|
||||
QUERY="series(X)"
|
||||
|
||||
|
@ -59,7 +59,7 @@ The package also includes implementations for a set of well-known inference algo
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Installation}
|
||||
PFL is included with the \href{http://www.dcc.fc.up.pt/~vsc/Yap/}{YAP} Prolog system. However, there isn't yet a stable release of YAP that includes PFL. So it is required to install a development version of YAP. To to this, you will need to have installed the Git version control system. The commands to do a default installation of YAP in the user's home in a Unix-based environment are shown next.
|
||||
PFL is included with the \href{http://www.dcc.fc.up.pt/~vsc/Yap/}{YAP} Prolog system. However, there isn't yet a stable release of YAP that includes PFL. So you will need to install a development version of YAP. To to so, you must have installed the \href{http://git-scm.com/}{Git} version control system. The commands to perform a default installation of YAP in your home directory in a Unix-based environment are shown next.
|
||||
|
||||
\begin{enumerate}
|
||||
\setlength\itemindent{-0.01cm}
|
||||
@ -70,7 +70,7 @@ PFL is included with the \href{http://www.dcc.fc.up.pt/~vsc/Yap/}{YAP} Prolog sy
|
||||
\item \texttt{\$ make depend \& make install}
|
||||
\end{enumerate}
|
||||
|
||||
In case you want to install YAP somewhere else or with different settings, please consult the YAP documentation. From now on, we will assume that the directory \texttt{\$HOME\pathsep bin} (where the binary can be found) is in your \texttt{\$PATH} environment variable.
|
||||
In case you want to install YAP somewhere else or with different settings, please consult the YAP documentation. From now on, we will assume that the directory \texttt{\$HOME\pathsep bin} (where the binary is) is in your \texttt{\$PATH} environment variable.
|
||||
|
||||
\label{examples-directory}
|
||||
Once in a while, we will refer to the PFL examples directory. In a default installation, this directory will be located at \texttt{\$HOME\pathsep share\pathsep doc\pathsep Yap\pathsep packages\pathsep examples\pathsep CLPBN}.
|
||||
@ -82,7 +82,7 @@ Once in a while, we will refer to the PFL examples directory. In a default insta
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Language}
|
||||
A first-order probabilistic graphical model is described using parametric factors, or just parfactors. The PFL syntax for a parfactor is
|
||||
A first-order probabilistic graphical model is described using parametric factors, commonly known as parfactors. The PFL syntax for a parfactor is
|
||||
|
||||
$$Type~~F~~;~~Phi~~;~~C.$$
|
||||
|
||||
@ -90,11 +90,11 @@ $$Type~~F~~;~~Phi~~;~~C.$$
|
||||
\begin{itemize}
|
||||
\item $Type$ refers the type of network over which the parfactor is defined. It can be \texttt{bayes} for directed networks, or \texttt{markov} for undirected ones.
|
||||
|
||||
\item $F$ is a comma-separated sequence of Prolog terms that will define sets of random variables under the constraint $C$. If $Type$ is \texttt{bayes}, the first term defines the node while the others defines its parents.
|
||||
\item $F$ is a comma-separated sequence of Prolog terms that will define sets of random variables under the constraint $C$. If $Type$ is \texttt{bayes}, the first term defines the node while the remaining terms define its parents.
|
||||
|
||||
\item $Phi$ is either a Prolog list of potential values or a Prolog goal that unifies with one. If $Type$ is \texttt{bayes}, this will correspond to the conditional probability table. Domain combinations are implicitly assumed in ascending order, with the first term being the 'most significant' (e.g. $\mathtt{x_0y_0}$, $\mathtt{x_0y_1}$, $\mathtt{x_0y_2}$, $\mathtt{x_1y_0}$, $\mathtt{x_1y_1}$, $\mathtt{x_1y_2}$).
|
||||
\item $Phi$ is either a Prolog list of potential values or a Prolog goal that unifies with one. Notice that if $Type$ is \texttt{bayes}, this will correspond to the conditional probability table. Domain combinations are implicitly assumed in ascending order, with the first term being the 'most significant' (e.g. $\mathtt{x_0y_0}$, $\mathtt{x_0y_1}$, $\mathtt{x_0y_2}$, $\mathtt{x_1y_0}$, $\mathtt{x_1y_1}$, $\mathtt{x_1y_2}$).
|
||||
|
||||
\item $C$ is a (possibly empty) list of Prolog goals that will instantiate the logical variables that appear in $F$, that is, the successful substitutions for the goals in $C$ will be the valid values for the logical variables. This allows the constraint to be any relation (set of tuples) over the logical variables.
|
||||
\item $C$ is a (possibly empty) list of Prolog goals that will instantiate the logical variables that appear in $F$, that is, the successful substitutions for the goals in $C$ will be the valid values for the logical variables. This allows the constraint to be defined as any relation (set of tuples) over the logical variables.
|
||||
\end{itemize}
|
||||
|
||||
|
||||
@ -189,14 +189,16 @@ wet_grass_table(
|
||||
0.01, 0.1, 0.1, 1.0 ]).
|
||||
\end{pflcode}
|
||||
|
||||
Note that this network is fully grounded, as the constraints are all empty. Next we present the PFL representation for a well-known markov logic network - the social network model. The weighted formulas of this model are shown below.
|
||||
We started by loading the PFL library, then we have defined one factor for each node, and finally we have specified the probabilities for each conditional probability table.
|
||||
|
||||
Notice that this network is fully grounded, as all constraints are empty. Next we present the PFL representation for a well-known markov logic network - the social network model. For convenience, the two main weighted formulas of this model are shown below.
|
||||
|
||||
\begin{pflcode}
|
||||
1.5 : Smokes(x) => Cancer(x)
|
||||
1.1 : Smokes(x) ^ Friends(x,y) => Smokes(y)
|
||||
\end{pflcode}
|
||||
|
||||
We can represent this model using PFL with the following code.
|
||||
Next, we show the PFL representation for this model.
|
||||
|
||||
\begin{pflcode}
|
||||
:- use_module(library(pfl)).
|
||||
@ -216,9 +218,9 @@ markov friends(X,Y), smokes(X), smokes(Y) ;
|
||||
%markov cancer(X) ; [1.0, 9.974]; [person(X)].
|
||||
%markov friends(X,Y) ; [1.0, 99.484] ; [person(X), person(Y)].
|
||||
|
||||
Notice that we defined the world to be consisted of two persons, \texttt{anne} and \texttt{bob}. We can easily add as many persons as we want by inserting in the program a fact like \texttt{person @ 10.}~. This would create ten persons named \texttt{p1}, \texttt{p2}, \dots, \texttt{p10}.
|
||||
Notice that we have defined the world to be consisted of only two persons, \texttt{anna} and \texttt{bob}. We can easily add as many persons as we want by inserting in the program a fact like \texttt{person @ 10.}~. This would automatically create ten persons named \texttt{p1}, \texttt{p2}, \dots, \texttt{p10}.
|
||||
|
||||
Unlike other fist-order probabilistic languages, in PFL the logical variables that appear in the terms are not directly typed, and they will be only constrained by the goals that appear in the constraint of the parfactor. This allows the logical variables to be constrained by any relation (set of tuples), and not by pairwise (in)equalities. For instance, the next example defines a ground network with three factors, each over the random variables \texttt{p(a,b)}, \texttt{p(b,d)} and \texttt{p(d,e)}.
|
||||
Unlike other fist-order probabilistic languages, in PFL the logical variables that appear in the terms are not directly typed, and they will be only constrained by the goals that appears in the constraint of the parfactor. This allows the logical variables to be constrained to any relation (set of tuples), and not only pairwise (in)equalities. For instance, the next example defines a network with three ground factors, each defined respectively over the random variables \texttt{p(a,b)}, \texttt{p(b,d)} and \texttt{p(d,e)}.
|
||||
|
||||
\begin{pflcode}
|
||||
constraint(a,b).
|
||||
@ -228,9 +230,9 @@ constraint(d,e).
|
||||
markov p(A,B); some_table; [constraint(A,B)].
|
||||
\end{pflcode}
|
||||
|
||||
We can easily add static evidence to PFL programs by inserting a fact with the same functor and arguments as the random variable, plus one extra argument with the observed state or value. For instance, suppose that we now that \texttt{anna} and \texttt{bob} are friends. We can add this knowledge to the program with the following fact: \texttt{friends(anna,bob,t).}~.
|
||||
We can easily add static evidence to PFL programs by inserting a fact with the same functor and arguments as the random variable, plus one extra argument with the observed state or value. For instance, suppose that we know that \texttt{anna} and \texttt{bob} are friends. We can add this knowledge to the program with the following fact: \texttt{friends(anna,bob,t).}~.
|
||||
|
||||
One last note for the domain of the random variables. By default all terms will generate boolean (\texttt{t}/\texttt{f}) random variables. It is possible to chose a different domain by appending a list of the possible values or states to the term. Next we present a self-explanatory example of how this can be done.
|
||||
One last note for the domain of the random variables. By default, all terms instantiate boolean (\texttt{t}/\texttt{f}) random variables. It is possible to choose a different domain for a term by appending a list of its possible values or states. Next we present a self-explanatory example of how this can be done.
|
||||
|
||||
\begin{pflcode}
|
||||
bayes professor_ability::[high, medium, low] ; [0.5, 0.4, 0.1].
|
||||
@ -245,19 +247,19 @@ More probabilistic models defined using PFL can be found in the examples directo
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Querying}
|
||||
In this section we demonstrate how to use PFL to solve probabilistic queries. We will use the sprinkler network as an example.
|
||||
In this section we demonstrate how to use PFL to solve probabilistic queries. We will use the sprinkler network as example.
|
||||
|
||||
Assuming that the current directory is the one where the examples are located, first we load the model as follows.
|
||||
Assuming that the current directory is the one where the examples are located, first we load the model with the following command.
|
||||
|
||||
\texttt{\$ yap -l sprinker.pfl}
|
||||
|
||||
Let's suppose that we want to estimate the marginal probability for the $WetGrass$ random variable. We can do it calling the following goal:
|
||||
Let's suppose that we want to estimate the marginal probability for the $WetGrass$ random variable. To do so, we call the following goal.
|
||||
|
||||
\texttt{?- wet\_grass(X).}
|
||||
|
||||
The output of the goal will show the marginal probability for each $WetGrass$ possible state or value, that is, \texttt{t} and \texttt{f}. Notice that in PFL a random variable is identified by a term with the same functor and arguments plus one extra argument.
|
||||
The output of this goal will show the marginal probability for each $WetGrass$ possible state or value, that is, \texttt{t} and \texttt{f}. Notice that in PFL a random variable is identified by a term with the same functor and arguments plus one extra argument.
|
||||
|
||||
Now let's suppose that we want to estimate the probability for the same random variable, but this time we have evidence that it had rained the day before. We can estimate this probability without resorting to static evidence with:
|
||||
Now let's suppose that we want to estimate the probability for the same random variable, but this time we have evidence that it had rained in the day before. We can estimate this probability without resorting to static evidence with:
|
||||
|
||||
\texttt{?- wet\_grass(X), rain(t).}
|
||||
|
||||
@ -267,18 +269,6 @@ PFL also supports calculating joint probability distributions. For instance, we
|
||||
|
||||
|
||||
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Parameter Learning}
|
||||
PFL is capable to learn the parameters for bayesian networks, through an implementation of the expectation-maximization algorithm.
|
||||
|
||||
Inside the \texttt{learning} directory from the examples directory, one can find some examples of how learning works in PFL.
|
||||
|
||||
We can define the solver that will be used for the inference part during parameter learning with the \texttt{set\_em\_solver/1} predicate (defaults to \texttt{hve}). At the moment, only the following solvers support parameter learning: \texttt{ve}, \texttt{hve}, \texttt{bdd}, \texttt{bp} and \texttt{cbp}.
|
||||
|
||||
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
@ -302,36 +292,36 @@ For instance, if we want to use belief propagation to solve some probabilistic q
|
||||
|
||||
\texttt{?- set\_solver(bp).}
|
||||
|
||||
It is possible to tweak some parameters of PFL through \texttt{set\_horus\_flag/2} predicate. The first argument is a key that identifies the parameter that we desire to tweak, while the second is some possible value for this key.
|
||||
It is possible to tweak some parameters of PFL through \texttt{set\_horus\_flag/2} predicate. The first argument is a key that identifies the parameter that we want to tweak. The second argument is some possible value for this key.
|
||||
|
||||
The \texttt{verbosity} key controls the level of debugging information that will be printed. Its possible values are positive integers. The higher the number, the more information that will be shown. For example, to view some basic debugging information we call:
|
||||
The \texttt{verbosity} key controls the level of debugging information that will be printed. Its possible values are positive integers. The higher the number, the more information that will be shown. For instance, we can view some basic debugging information by calling the following goal.
|
||||
|
||||
\texttt{?- set\_horus\_flag(verbosity, 1).}
|
||||
|
||||
This key defaults to 0 (no debugging information) and only \texttt{hve}, \texttt{bp}, \texttt{cbp}, \texttt{lve}, \texttt{lkc} and \texttt{lbp} solvers have support for this key.
|
||||
This key defaults to 0 (no debugging) and only \texttt{hve}, \texttt{bp}, \texttt{cbp}, \texttt{lve}, \texttt{lkc} and \texttt{lbp} solvers have support for this key.
|
||||
|
||||
The \texttt{use\_logarithms} key controls whether the calculations performed during inference should be done in a logarithm domain or not. Its values can be \texttt{true} or \texttt{false}. By default is \texttt{true} and only affects \texttt{hve}, \texttt{bp}, \texttt{cbp}, \texttt{lve}, \texttt{lkc} and \texttt{lbp} solvers. The remaining solvers always do their calculations in a logarithm domain.
|
||||
The \texttt{use\_logarithms} key controls whether the calculations performed during inference should be done in a logarithm domain or not. Its values can be \texttt{true} (default) or \texttt{false}. This key only affects \texttt{hve}, \texttt{bp}, \texttt{cbp}, \texttt{lve}, \texttt{lkc} and \texttt{lbp} solvers. The remaining solvers always perform their calculations in a logarithm domain.
|
||||
|
||||
There are keys specific only to some algorithms. The key \texttt{elim\_heuristic} key allows to chose which elimination heuristic will be used by the \texttt{hve} solver (but not \texttt{ve}). The following are supported:
|
||||
There are keys specific to some algorithms. The \texttt{hve\_elim\_heuristic} key allows to choose which elimination heuristic will be used by the \texttt{hve} solver (but not \texttt{ve}). The following are supported:
|
||||
\begin{itemize}
|
||||
\item \texttt{sequential}
|
||||
\item \texttt{min\_neighbors}
|
||||
\item \texttt{min\_weight}
|
||||
\item \texttt{min\_fill}
|
||||
\item \texttt{weighted\_min\_fill}
|
||||
\item \texttt{weighted\_min\_fill} (default)
|
||||
\end{itemize}
|
||||
|
||||
It defaults to \texttt{weighted\_min\_fill}. An explanation of each of these heuristics can be found in Daphne Koller's book \textit{Probabilistic Graphical Models}.
|
||||
An explanation for each of these heuristics can be found in Daphne Koller's book \textit{Probabilistic Graphical Models}.
|
||||
|
||||
The \texttt{bp\_msg\_schedule}, \texttt{bp\_accuracy} and \texttt{bp\_max\_iter} keys are specific for message passing based algorithms, namely \texttt{bp}, \texttt{cbp} and \texttt{lbp}.
|
||||
|
||||
The \texttt{bp\_max\_iter} key establishes a maximum number of iterations. One iteration consists in sending all possible messages. It defaults to 1000.
|
||||
The \texttt{bp\_max\_iter} key establishes a maximum number of iterations. One iteration consists in sending all possible messages. It defaults to \texttt{1000}.
|
||||
|
||||
The \texttt{bp\_accuracy} key indicates when the message passing should cease. Be the residual of one message the difference (according some metric) between the one sent in the current iteration and the one sent in the previous. If the highest residual is lesser than the given value, the message passing is stopped and the probabilities are calculated using the last messages that were sent. This key defaults to 0.0001.
|
||||
The \texttt{bp\_accuracy} key allows to control when the message passing should cease. Be the residual of one message the difference (according some metric) between the one sent in the current iteration and the one sent in the previous. If the highest residual is lesser than the given value, the message passing is stopped and the probabilities are calculated using the last messages that were sent. This key defaults to \texttt{0.0001}.
|
||||
|
||||
The key \texttt{bp\_msg\_schedule} controls the message sending order. Its possible values are:
|
||||
The key \texttt{bp\_msg\_schedule} allows to control the message sending order. Its possible values are:
|
||||
\begin{itemize}
|
||||
\item \texttt{seq\_fixed}, at each iteration, all messages are sent with the same order.
|
||||
\item \texttt{seq\_fixed} (default), at each iteration, all messages are sent with the same order.
|
||||
|
||||
\item \texttt{seq\_random}, at each iteration, all messages are sent with a random order.
|
||||
|
||||
@ -339,9 +329,22 @@ The key \texttt{bp\_msg\_schedule} controls the message sending order. Its possi
|
||||
|
||||
\item \texttt{max\_residual}, the next message to be sent is the one with maximum residual (as explained in the paper \textit{Residual Belief Propagation: Informed Scheduling for Asynchronous Message Passing}).
|
||||
\end{itemize}
|
||||
It defaults to \texttt{seq\_fixed}.
|
||||
|
||||
The \texttt{export\_libdai} and \texttt{export\_uai} keys can be used to export the current model respectively to \href{http://cs.ru.nl/~jorism/libDAI/doc/fileformats.html}{libDAI}, and \href{http://graphmod.ics.uci.edu/uai08/FileFormat}{UAI08} formats. With the \texttt{export\_graphviz} key it is possible to save the factor graph into a format that can be read by \href{http://www.graphviz.org/}{Graphviz}. The \texttt{print\_fg} key allows to print all factors before perform inference. All these four keys accept \texttt{true} and \texttt{false} as their values and only produce effect in \texttt{hve}, \texttt{bp}, and \texttt{cbp} solvers.
|
||||
The \texttt{export\_libdai} and \texttt{export\_uai} keys can be used to export the current model respectively to \href{http://cs.ru.nl/~jorism/libDAI/doc/fileformats.html}{libDAI}, and \href{http://graphmod.ics.uci.edu/uai08/FileFormat}{UAI08} formats. With the \texttt{export\_graphviz} key it is possible to export the factor graph structure into a format that can be parsed by \href{http://www.graphviz.org/}{Graphviz}. The \texttt{print\_fg} key allows to print a textual representation of the factor graph. All these four keys accept \texttt{true} and \texttt{false} as their values and only have effect in \texttt{hve}, \texttt{bp}, and \texttt{cbp} solvers.
|
||||
|
||||
|
||||
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Parameter Learning}
|
||||
PFL is capable to learn the parameters for bayesian networks, through an implementation of the expectation-maximization algorithm.
|
||||
|
||||
Inside the \texttt{learning} directory from the examples directory, one can find some examples of how learning works in PFL.
|
||||
|
||||
It is possible to choose the solver that will be used for the inference part during parameter learning with the \texttt{set\_em\_solver/1} predicate (defaults to \texttt{hve}). At the moment, only the following solvers support parameter learning: \texttt{ve}, \texttt{hve}, \texttt{bdd}, \texttt{bp} and \texttt{cbp}.
|
||||
|
||||
|
||||
|
||||
|
||||
@ -350,7 +353,7 @@ The \texttt{export\_libdai} and \texttt{export\_uai} keys can be used to export
|
||||
%------------------------------------------------------------------------------
|
||||
%------------------------------------------------------------------------------
|
||||
\section{Horus Command Line}
|
||||
This package also includes an external interface to YAP for perform inference over probabilistic graphical models described in formats other than PFL. Currently two are support, the \href{http://cs.ru.nl/~jorism/libDAI/doc/fileformats.html}{libDAI file format}, and the \href{http://graphmod.ics.uci.edu/uai08/FileFormat}{UAI08 file format}.
|
||||
This package also includes an external command for perform inference over probabilistic graphical models described in formats other than PFL. Currently two are support, the \href{http://cs.ru.nl/~jorism/libDAI/doc/fileformats.html}{libDAI file format}, and the \href{http://graphmod.ics.uci.edu/uai08/FileFormat}{UAI08 file format}.
|
||||
|
||||
This utility is called \texttt{hcli} and its usage is as follows.
|
||||
|
||||
@ -367,7 +370,7 @@ With the above command, the program will load the model and print the marginal p
|
||||
|
||||
\texttt{\$ ./hcli burglary-alarm.uai 0}
|
||||
|
||||
If we give more than one variable identifier as argument, the program will output the joint probability for all variables given.
|
||||
If we give more than one variable identifier as argument, the program will output the joint probability for all the passed variables.
|
||||
|
||||
Evidence can be given as a pair containing a variable identifier and its observed state (index), separated by a '=`. For instance, we can introduce knowledge that some variable with identifier $0$ has evidence on its second state as follows.
|
||||
|
||||
@ -379,7 +382,7 @@ By default, all probability tasks are resolved using the \texttt{hve} solver. It
|
||||
|
||||
Notice that only the \texttt{hve}, \texttt{bp} and \texttt{cbp} solvers can be used with \texttt{hcli}.
|
||||
|
||||
The options that are available with the \texttt{set\_horus\_flag/2} predicate can be used in \texttt{hcli} too. The syntax to use are pairs \texttt{<Key>=<Value>} before the model's file name.
|
||||
The options that are available with the \texttt{set\_horus\_flag/2} predicate can be used in \texttt{hcli} too. The syntax is a pair \texttt{<Key>=<Value>} before the model's file name.
|
||||
|
||||
|
||||
|
||||
@ -390,6 +393,6 @@ The options that are available with the \texttt{set\_horus\_flag/2} predicate ca
|
||||
\section{Further Information}
|
||||
Please check the paper \textit{Evaluating Inference Algorithms for the Prolog Factor Language} for further information.
|
||||
|
||||
Any question don't hesitate to contact us!
|
||||
Any question? Don't hesitate to contact us!
|
||||
|
||||
\end{document}
|
||||
|
Reference in New Issue
Block a user