From 32266a8b9801a4483641deef63699b07e5701db6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADtor=20Santos=20Costa?= Date: Wed, 10 Oct 2018 09:39:04 +0100 Subject: [PATCH] problog --- C/yap-args.c | 3 + H/YapLFlagInfo.h | 26 ++++++-- packages/ProbLog/problog.yap | 2 +- packages/ProbLog/problog_lbfgs.yap | 93 ++++++++++++++++----------- packages/ProbLog/problog_learning.yap | 2 +- pl/qly.yap | 6 +- 6 files changed, 83 insertions(+), 49 deletions(-) diff --git a/C/yap-args.c b/C/yap-args.c index 9c621e45c..78c30e4ba 100755 --- a/C/yap-args.c +++ b/C/yap-args.c @@ -1056,6 +1056,9 @@ X_API void YAP_Init(YAP_init_args *yap_init) { init_globals(yap_init); start_modules(); + if (yap_init->QuietMode) { + setVerbosity(TermSilent); + } if (yap_init->install && Yap_OUTPUT_STARTUP) { setAtomicGlobalPrologFlag(RESOURCE_DATABASE_FLAG, MkAtomTerm(Yap_LookupAtom(Yap_INPUT_STARTUP))); diff --git a/H/YapLFlagInfo.h b/H/YapLFlagInfo.h index 22cce7aa7..376a4989e 100644 --- a/H/YapLFlagInfo.h +++ b/H/YapLFlagInfo.h @@ -30,10 +30,11 @@ START_LOCAL_FLAGS /** + `autoload`: set the system to look for undefined procedures */ YAP_FLAG(AUTOLOAD_FLAG, "autoload", true, booleanFlag, "false", NULL), - /** + `read-only flag, that tells if Prolog is in an inner top-level */ + +/** + `read-only flag, that tells if Prolog is in an inner top-level */ YAP_FLAG(BREAK_LEVEL_FLAG, "break_level", true, nat, "0", NULL), - YAP_FLAG(CALL_COUNTING_FLAG, "call_counting", true, booleanFlag, "true", - NULL), /** + `call_counting` + + /** + `call_counting` Predicates compiled with this flag set maintain a counter on the numbers of proceduree calls and of retries. These counters @@ -51,18 +52,29 @@ YAP_FLAG(AUTOLOAD_FLAG, "autoload", true, booleanFlag, "false", NULL), If `on` `fileerrors` is `on`, if `off` (default) `fileerrors` is disabled. */ + YAP_FLAG(CALL_COUNTING_FLAG, "call_counting", true, booleanFlag, "true", + NULL), + +/** + support for coding systens, YAP relies on UTF-8 internally. + */ YAP_FLAG(ENCODING_FLAG, "encoding", true, isatom, "utf-8", getenc), - YAP_FLAG(FILEERRORS_FLAG, "fileerrors", true, booleanFlag, "true", + +/** + what to do if opening a file fails. + + */ + YAP_FLAG(FILEERRORS_FLAG, "fileerrors", true, booleanFlag, "true", NULL), /** + `fileerrors` If `on` `fileerrors` is `on`, if `off` (default) `fileerrors` is disabled. */ - YAP_FLAG(LANGUAGE_MODE_FLAG, "language_mode", true, isatom, "yap", - NULL), /** + `language_mode` + /** + `language_mode` - wweter native mode or trying to emulate a different Prolog. + wweter native mode or trying to emulate a different + Prolog. */ + YAP_FLAG(LANGUAGE_MODE_FLAG, "language_mode", true, isatom, "yap", + NULL), YAP_FLAG(STACK_DUMP_ON_ERROR_FLAG, "stack_dump_on_error", true, booleanFlag, "true", NULL), /** + `stack_dump_on_error ` diff --git a/packages/ProbLog/problog.yap b/packages/ProbLog/problog.yap index 3dece1c0a..0175db465 100644 --- a/packages/ProbLog/problog.yap +++ b/packages/ProbLog/problog.yap @@ -2613,7 +2613,7 @@ compute_bounds(LP, UP, Status) :- % same as problog_threshold/5, but lower bound only (no stopped derivations stored) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -problog_low(Goal/Cond, Threshold, _, _) :- +problog_low(Goal/Cond, Threshold, Status, P) :- !, problog_low((Cond,Goal), Threshold, P1, Status), problog_low( Cond, Threshold, P2, Status), diff --git a/packages/ProbLog/problog_lbfgs.yap b/packages/ProbLog/problog_lbfgs.yap index c3556c207..55822ca75 100644 --- a/packages/ProbLog/problog_lbfgs.yap +++ b/packages/ProbLog/problog_lbfgs.yap @@ -563,7 +563,7 @@ empty_bdd_directory. set_default_gradient_method :- problog_flag(continuous_facts, true), !, - problog_flag(init_method,OldMethod), + % problog_flag(init_method,OldMethod), format_learning(2,'Theory uses continuous facts.~nWill use problog_exact/3 as initalization method.~2n',[]), set_problog_flag(init_method,(Query,Probability,BDDFile,ProbFile,problog_exact_save(Query,Probability,_Status,BDDFile,ProbFile))). set_default_gradient_method :- @@ -592,7 +592,7 @@ bdd_input_file(Filename) :- problog_flag(output_directory,Dir), concat_path_with_filename(Dir,'input.txt',Filename). -init_one_query(QueryID,Query,Type) :- +init_one_query(QueryID,Query,_Type) :- % format_learning(3,' ~q example ~q: ~q~n',[Type,QueryID,Query]), %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -784,7 +784,7 @@ mse_testset :- logger_set_variable(mse_min_testset,MinError), logger_set_variable(mse_max_testset,MaxError), logger_set_variable(llh_test_queries,LLH_Test_Queries), - format_learning(2,' (~8f)~n',[M]). + format_learning(2,' (~8f)~n',[MSE]). %======================================================================== @@ -819,8 +819,9 @@ save_old_probabilities :- % vsc: avoid silly search gradient_descent :- - current_iteration(Iteration), - create_training_predictions_file_name(Iteration,File_Name), + problog_flag(sigmoid_slope,Slope), +% current_iteration(Iteration), + % create_training_predictions_file_name(Iteration,File_Name), Handle = user_error, format(Handle,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n",[]), format(Handle,"% Iteration, train/test, QueryID, Query, GroundTruth, Prediction %~n",[]), @@ -829,70 +830,89 @@ gradient_descent :- findall(FactID,tunable_fact(FactID,GroundTruth),L), length(L,N), % leash(0),trace, lbfgs_initialize(N,X,0,Solver), - N1 is N-1, - forall(tunable_fact(FactID,GroundTruth), (X[FactID] <== 0.5)), + forall(tunable_fact(FactID,GroundTruth), + (XZ is 0.5, X[FactID] <== XZ,set_fact_probability(FactID,XZ))), problog_flag(sigmoid_slope,Slope), + %%% lbfgs_set_parameter(min_step, 2e-40, Solver), lbfgs_run(Solver,BestF), format('~2nOptimization done~nWe found a minimum ~4f.~n',[BestF]), - forall(tunable_fact(FactID,GroundTruth), set_tunable(FactID,GroundTruth,X)), + forall(tunable_fact(FactID,GroundTruth), set_tunable(FactID,X)), lbfgs_finalize(Solver). -set_tunable(I, GroundTruth,P) :- +set_tunable(I,P) :- Pr <== P[I], - get_fact(I,Source), - format('fact(~d, ~q, ~4f, ~4f).~n',[I,Source,GroundTruth,Pr]), set_fact_probability(I,Pr). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % start calculate gradient %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% user:evaluate(LLH_Training_Queries, X,Grad,N,_,_) :- - Handle = user_error, + %Handle = user_error, + GradCount <== array[N] of ints, problog_flag(sigmoid_slope,Slope), Probs = X, N1 is N-1, + forall(between(0,N1,I), (Grad[I] <== 0.0) %, sigmoid(X[I],Slope,Probs[I]) ) - ), + ), findall(LL, - compute_grad(N, X, Grad, Probs, Slope, Handle, LL), + compute_grad(Grad, GradCount, Probs, Slope, LL), LLs ), - sum_list(LLs,LLH_Training_Queries), - forall(tunable_fact(FactID,GroundTruth), (Z<==X[FactID],W<==Grad[FactID],writeln(FactID:(W->Z)))). + sum_list(LLs,LLH_Training_Queries). +%wrap(X, Grad, GradCount). -compute_grad(N, X, Grad, Probs, Slope, Handle, LL) :- + +compute_grad(Grad, GradCount, Probs, Slope, LL) :- user:example(QueryID,_Query,QueryProb,_), recorded(QueryID,BDD,_), BDD = bdd(_Dir, _GradTree, MapList), - bind_maplist(MapList, Slope, Probs), -%writeln( qprobability(BDD,Slope,BDDProb) ), + MapList = [_|_], + bind_maplist(MapList, Slope, Probs), +%writeln( MapList ), qprobability(BDD,Slope,BDDProb), -%writeln( gradientpair(BDD,Slope,BDDProb, QueryProb, Grad) ), - gradientpair(BDD,Slope,BDDProb, QueryProb, Grad), - LL is (((BDDProb)-(QueryProb))**2). + LL is (((BDDProb)-(QueryProb))**2), +%writeln( qprobability(BDD,Slope,BDDProb) ), + forall( + member(I-_, MapList), + gradientpair(I, BDD,Slope,BDDProb, QueryProb, Grad, GradCount) + ). -gradientpair(BDD,Slope,BDDProb, QueryProb, Grad) :- - qgradient(BDD, Slope, FactID, GradValue), +gradientpair(I, BDD,Slope,BDDProb, QueryProb, Grad, GradCount) :- + qgradient(I, BDD, Slope, FactID, GradValue), % writeln(FactID), G0 <== Grad[FactID], %writeln( GN is G0-GradValue*(QueryProb-BDDProb)), - GN is G0-GradValue*(QueryProb-BDDProb), - %writeln(FactID:(G0->GN)), + GN is G0-GradValue*2*(QueryProb-BDDProb), + %writeln(FactID:(G0->GN)), + GC <== GradCount[FactID], + GC1 is GC+1, + GradCount[FactID] <== GC1, Grad[FactID] <== GN. -gradientpair(_BDD,_Slope,_BDDProb, _Grad). -qprobability(bdd(Dir, Tree, MapList), Slope, Prob) :- +qprobability(bdd(Dir, Tree, _MapList), Slope, Prob) :- /* query_probability(21,6.775948e-01). */ run_sp(Tree, Slope, 1.0, Prob0), (Dir == 1 -> Prob0 = Prob ; Prob is 1.0-Prob0). -qgradient(bdd(Dir, Tree, MapList), Slope, I, Grad) :- - member(I-_, MapList), +qgradient(I, bdd(Dir, Tree, _MapList), Slope, I, Grad) :- run_grad(Tree, I, Slope, 0.0, Grad0), ( Dir = 1 -> Grad = Grad0 ; Grad is -Grad0). +wrap( X, Grad, GradCount) :- + tunable_fact(FactID,GroundTruth), + Z<==X[FactID], + W<==Grad[FactID], + WC<==GradCount[FactID], + WC > 0, + format('ex(~d, ~q, ~4f, ~4f).~n',[FactID,GroundTruth,Z,W]), +% Grad[FactID] <== WN, + fail. +wrap( _X, _Grad, _GradCount). + + % writeln(grad(QueryID:I:Grad)), % assert(query_gradient_intern(QueryID,I,p,Grad)), % fail. @@ -931,17 +951,17 @@ run_grad([gnodep(P,G, EP, Id, PL, GL, PR, GR)|Tree], I, Slope, _, GF) :- P is EP*PL+ (1.0-EP)*PR, G0 is EP*GL + (1.0-EP)*GR, % don' t forget the -X - ( I == Id -> G is G0+(PL-PR)* EP*(1-EP)*Slope ; G = G0 ), + ( I == Id -> G is PL-PR ; G = G0 ), run_grad(Tree, I, Slope, G, GF). run_grad([gnoden(P,G, EP, Id, PL, GL, PR, GR)|Tree], I, Slope, _, GF) :- P is EP*PL + (1.0-EP)*(1.0 - PR), G0 is EP*GL - (1.0 - EP) * GR, - ( I == Id -> G is G0+(PL+PR-1)*EP*(1-EP)*Slope ; G = G0 ), + ( I == Id -> G is PL-(1.0-PR) ; G = G0 ), run_grad(Tree, I, Slope, G, GF). -prob2log(X,Slope,FactID,V) :- +prob2log(_X,Slope,FactID,V) :- get_fact_probability(FactID, V0), inv_sigmoid(V0, Slope, V). @@ -949,17 +969,16 @@ log2prob(X,Slope,FactID,V) :- V0 <== X[FactID], sigmoid(V0, Slope, V). -bind_maplist([], Slope, X). +bind_maplist([], _Slope, _X). bind_maplist([Node-Pr|MapList], Slope, X) :- - V0 <== X[Node], -sigmoid(V0, Slope, V), + Pr <== X[Node], bind_maplist(MapList, Slope, X). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % stop calculate gradient %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% user:progress(FX,X,_G,X_Norm,G_Norm,Step,_N,Iteration,Ls,0) :- - problog_flag(sigmoid_slope,Slope), + % problog_flag(sigmoid_slope,Slope), X0 <== X[0], X1 <== X[1], format('~d. Iteration : (x0,x1)=(~4f,~4f) f(X)=~4f |X|=~4f |X\'|=~4f Step=~4f Ls=~4f~n',[Iteration,X0 ,X1,FX,X_Norm,G_Norm,Step,Ls]). diff --git a/packages/ProbLog/problog_learning.yap b/packages/ProbLog/problog_learning.yap index 1797a2a84..5d60bf244 100644 --- a/packages/ProbLog/problog_learning.yap +++ b/packages/ProbLog/problog_learning.yap @@ -1335,7 +1335,7 @@ lineSearch(Final_X,Final_Value) :- line_search_evaluate_point(InitLeft,Value_InitLeft), - Parameters=ls(A,B,InitLeft,InitRight,Value_A,Value_B,Value_InitLeft,Value_InitRight,1), +i Parameters=ls(A,B,InitLeft,InitRight,Value_A,Value_B,Value_InitLeft,Value_InitRight,1), %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%% BEGIN BACK TRACKING diff --git a/pl/qly.yap b/pl/qly.yap index 209f002c3..1e5731909 100755 --- a/pl/qly.yap +++ b/pl/qly.yap @@ -357,11 +357,11 @@ available it tries reconsulting the source file. */ qload_module(Mod) :- - ( current_prolog_flag(verbose_load, false) + ( current_prolog_flag(verbose_load, true) -> - Verbosity = silent - ; Verbosity = informational + ; + current_prolog_flag(verbose_load, Verbosity) ), StartMsg = loading_module, EndMsg = module_loaded,