1 % (c) 2009-2024 Lehrstuhl fuer Softwaretechnik und Programmiersprachen,
2 % Heinrich Heine Universitaet Duesseldorf
3 % This software is licenced under EPL 1.0 (http://www.eclipse.org/org/documents/epl-v10.html)
4
5 %:- use_module('../extensions/profiler/profiler.pl').
6
7 :- module(test_runner, [
8 test_repl/0,
9 run_silently/0,
10 run_safely/0,
11 run_last_test/0, last/0,
12 run_tests_by_id/1, ids/1, run_id/1,
13 run_tests_all/0, run_tests_all_except/1,
14 run_tests_by_category/1, run_tests_by_category/2, category/1,
15 run_tests_by_first_category/1,
16 makefile_by_category/2, copy/1,
17 generate_makefile_all_tests/0,
18 generate_makefile_for_coverage/0,
19 full_flush/0,
20 jenkins_sanity_check/0,
21 sanity_check/1,
22 v/0, vv/0,
23 cache/0, % enable caching
24 all_categories/1,
25 all_ids/1,
26 ids_in_category/2,
27 set_prob_examples_location/1,
28 halt_tests/0 % use to call halt with proper exit code; however, normally test predicates will call halt1 directly, mainly useful in junit mode
29 ]).
30
31 :- if(predicate_property(expects_dialect(_), _)).
32 :- expects_dialect(sicstus4).
33 :- endif.
34
35 % Some errors, such as syntax errors, are only printed through the message mechanism and not thrown as exceptions.
36 % These errors also don't terminate SICStus or affect the exit code,
37 % which makes it easy for them to go unnoticed in CI runs.
38 % As a workaround, add a portray_message hook to detect error messages
39 % and halt the test runner at an appropriate time.
40
41 :- dynamic(prolog_error_occurred/0).
42
43 :- multifile(user:portray_message/2).
44 user:portray_message(error, _) :-
45 \+ prolog_error_occurred,
46 assertz(prolog_error_occurred),
47 fail. % to let SICStus portray the error
48
49 check_no_prolog_startup_error :-
50 prolog_error_occurred,
51 !,
52 format('*** Error(s) from Prolog while starting test runner! Aborting. ***~n', []),
53 halt(1).
54 check_no_prolog_startup_error.
55 :- initialization(check_no_prolog_startup_error).
56
57 :- use_module('../src/pathes', []). % set up library search paths
58
59 % comment in to ensure that plspec loaded first:
60 % :- use_module('../extensions/plspec/plspec/plspec_core').
61
62 :- use_module(probsrc(prob_cli)). % we call go_cli
63
64 %:- set_prolog_flag(compiling,debugcode).
65 %:- set_prolog_flag(source_info,on).
66 %:- set_prolog_flag(profiling,on).
67
68 /*
69 :- use_module('../extensions/profiler/profiler.pl'). % (adds the profiler himself)
70 :- use_module('../extensions/profiler/profiler_te.pl'). % (adds the term expander)
71 %:- use_module('../extensions/profiler/profiler_gui.pl'). % (adds the term expander)
72 :- enable_profiling_all(b_expression_sharing).
73 :- enable_profiling(bsyntaxtree:transform_bexpr_with_acc/5).
74 :- enable_profiling(bsyntaxtree:transform_bexpr/3).
75 :- enable_profiling(b_ast_cleanup:recompute_used_ids_info/2).
76 :- enable_profiling(bsyntaxtree:sub_expression_contains_wd_condition/1).
77
78 for starting from Spider:
79 set_prob_examples_location('../../prob_examples'), run_silently, test_repl.
80 or use test_runner_cov.pl
81 */
82
83
84 :- use_module(library(system)).
85 :- use_module(library(lists)).
86 :- use_module(library(sets), [intersect/2]).
87 :- use_module(library(file_systems)).
88
89 :- use_module(probsrc(junit_tests),[set_junit_dir/1,create_and_print_junit_result/4]).
90 :- use_module(testcases).
91 :- use_module(probsrc(system_call)).
92 :- use_module(probsrc(tools),[safe_number_codes/2,get_options/5]).
93 :- use_module(probsrc(tools_commands),[edit_file/1]).
94 :- use_module(probsrc(tools_printing), [start_terminal_colour/2, reset_terminal_colour/1, format_with_colour/4, format_with_colour_nl/4]).
95 :- use_module(probsrc(error_manager),[get_total_number_of_errors/1, get_all_errors/1]).
96 :- use_module(probsrc(debug),[formatsilent/3, debug_mode/1]).
97 :- use_module(probsrc(performance_messages),[toggle_perfmessages/0]).
98
99 :- use_module(probsrc(module_information)).
100 :- module_info(group,testing).
101 :- module_info(description,'This module runs the tests stored in testcases.pl.').
102
103 :- set_prolog_flag(double_quotes, codes).
104
105
106 :- use_module(probsrc(tools), [ajoin_with_sep/3]).
107 generate_makefile(RegenerateGoal,Filename,TargetCommand) :-
108 tell(Filename),
109 all_first_categories(AllTestCategories),
110 ajoin_with_sep(AllTestCategories, ' \\\n\t', JoinedCategories),
111 write('# This Makefile is generated automatically. DO NOT EDIT.\n'),
112 write('# If you have added/removed a test category, regenerate using this command:\n'),
113 format('# ./prolog.sh --file tests/test_runner.pl --goal "~q, halt ; halt(1)."\n\n', [RegenerateGoal]),
114 format('categories = \\\n\t~w\n\n', [JoinedCategories]),
115 write('.PHONY: test_all_source\n'),
116 write('test_all_source: $(categories)\n\n'),
117 write('.PHONY: $(categories)\n'),
118 format('$(categories):\n\t~w\n', [TargetCommand]),
119 told.
120
121 %write_subtargets([G]) :- write(G).
122 %write_subtargets([G|Gs]) :-
123 % write(G), write(' '),
124 % write_subtargets(Gs).
125
126 generate_makefile_all_tests :-
127 % run tests where category G is the first category to avoid running tests multiple times
128 % also avoids concurrency issues when test run multiple times in parallel with diff checking of output
129 generate_makefile(generate_makefile_all_tests, 'tests/Makefile_All_Tests',
130 % Makefile_All_Tests is (indirectly) included from the top-level Makefile,
131 % so the working directory is the repository root
132 % and the test_runner.pl path must include the tests/ directory prefix.
133 './prolog.sh --file tests/test_runner.pl --goal "run_silently, run_tests_by_first_category($@), halt_tests ; halt(1)."').
134
135 :- use_module(probsrc(tools), [host_platform/1]).
136 % TODO Do we want to install SWI-Prolog on all CI runners?
137 group_cannot_be_checked_on_ci_server(setlog).
138 group_cannot_be_checked_on_ci_server(smt_solver_integration) :-
139 host_platform(windows). % TODO Install Z3 on GitLab CI Windows runner
140
141 generate_makefile_for_coverage :-
142 generate_makefile(generate_makefile_for_coverage, 'Makefile_Coverage_Groups', './prolog.sh --file tests/prob_cov_runner.pl --goal "coverage_group_data($@), halt ; halt(1)."').
143
144 all_first_categories(X) :- % only categories that appear as first category
145 findall(Group1,
146 cli_testcase(_Id,[Group1|_],_Conf,_Args,_Desc),
147 ListOfGroups),
148 remove_dups(ListOfGroups,X).
149 all_categories(X) :-
150 findall(Groups,
151 cli_testcase(_Id,Groups,_Conf,_Args,_Desc),
152 List),
153 append(List,ListOfGroups),
154 remove_dups(ListOfGroups,X).
155
156 all_ids(X) :-
157 findall(Id,
158 cli_testcase(Id,_Groups,_Conf,_Args,_Desc),
159 List),
160 remove_dups(List,X).
161
162 ids_in_category(Category,IDs) :-
163 findall(Id,
164 (cli_testcase(Id,Groups,_Conf,_Args,_Desc),member(Category,Groups)),
165 List),
166 remove_dups(List,IDs).
167
168 prob_junit_dir(JUnitDir) :- environ('PROB_JUNIT_DIR', JUnitDir).
169
170 prob_junit_args(JUnitArgs) :-
171 prob_junit_dir(JUnitDir), !,
172 JUnitArgs = ['-junit', JUnitDir].
173 prob_junit_args([]).
174
175 halt1_allowed :- \+ repl_mode.
176
177 tests_multiply_timeout(Var) :-
178 environ('TESTS_MULTIPLY_TIMEOUT', Var2), !, tools:arg_is_number(Var2,Var).
179 tests_multiply_timeout(Var) :- multiply_timeout(Var).
180
181 :- dynamic multiply_timeout/1.
182
183 % call updir from within Spider, when you are in the src subdirectory
184 set_cur_dir :- (directory_exists('./src') -> true /* we seem to be in the right directory */
185 ; directory_exists('./cia') -> current_directory(_Old,'./..')
186 ; print('Please start tests from ProB directory or ProB/src directory'),nl,
187 print('Current directory: '),
188 current_directory(CD), print(CD), nl).
189
190 reset_test_runner(SessionInfo) :-
191 reset_nr_of_tests, set_cur_dir,
192 retractall(test_skipped(_)), retractall(test_failed(_)), retractall(last_test_failed(_)),
193 retractall(test_diff_failed(_,_,_)),
194 statistics(walltime,[WStart,_]),
195 bb_put(reset_test_runner_wtime,WStart),
196 bb_put(reset_test_runner_info,SessionInfo).
197
198
199 ids(Nr) :- show_command(Nr).
200 show_command(Nr) :- cli_testcase(Nr,_,_,L,Desc),
201 print('# '), print(Desc),nl, l_pr([probcli|L]).
202 l_pr([]) :- nl.
203 l_pr([H|T]) :- print(' '),print(H), l_pr(T).
204
205 :- volatile sanity_check_failed/0.
206 :- dynamic sanity_check_failed/0.
207 jenkins_sanity_check :-
208 sanity_check(true),
209 (sanity_check_failed -> halt1 ; halt(0)).
210 sanity_check(_Jenkins) :-
211 cli_testcase(Id,Categories,_,Args,_Comm1),
212 \+ member('-strict',Args),
213 format_warning('*** test does not use -strict option: ',[]), print_test(Id,Categories),nl,
214 assertz(sanity_check_failed), fail.
215 sanity_check(Jenkins) :-
216 Jenkins == false,
217 cli_testcase_diff_check_output(Id,_F1,_F2),
218 cli_testcase(Id,Categories,_I1,_A1,_Comm1),
219 (Categories = [_]
220 -> true
221 ; format_warning('*** multiple categories for test with output file; can generate race conditions when run in parallel: ',[]),
222 print_test(Id,Categories),
223 nl /* can generate race conditions when run in parallel */),
224 assertz(sanity_check_failed), fail.
225 sanity_check(_Jenkins) :-
226 cli_testcase_diff_check_output(Id,_F1,_F2),
227 \+ (cli_testcase(Id,_Categories,_I1,_A1,_Comm1)),
228 format_warning_nl('*** no cli_testcase, but diff output defined for id: ~w',[Id]),
229 assertz(sanity_check_failed), fail.
230 sanity_check(_Jenkins) :-
231 cli_testcase(Id,C1,I1,A1,Comm1),
232 cli_testcase(Id,C2,I2,A2,Comm2),
233 [C1,I1,A1,Comm1] \= [C2,I2,A2,Comm2],
234 format_warning_nl('*** multiple cli_testcases for id: ~w',[Id]),
235 assertz(sanity_check_failed), fail.
236 sanity_check(_Jenkins).
237
238
239 format_error_nl(F,Args) :- format_with_colour_nl(user_output,[red,bold],F,Args).
240 format_warning(F,Args) :- format_with_colour(user_output,[red],F,Args).
241 format_warning_nl(F,Args) :- format_with_colour_nl(user_output,[red],F,Args).
242 format_progress_nl(F,Args) :- format_with_colour_nl(user_output,[blue],F,Args).
243
244 print_test(Id,Comment) :- print(Id),print(' : '), print(Comment).
245
246 % run the latest test (with highest id)
247 last :- run_last_test.
248 run_last_test :- get_last_test_id(Id),
249 run_tests_by_id(Id,_,no_skipping).
250
251 get_last_test_id(Id) :- set_last_test_id, last_test_id(Id).
252
253 set_last_test_id :-
254 retractall(last_test_id(_)), assertz(last_test_id(-1)),
255 cli_testcase(Id,C1,I1,A1,Comm1),
256 (cli_testcase(Id,C2,I2,A2,Comm2),
257 (C1,I1,A1,Comm1) \= (C2,I2,A2,Comm2)
258 -> format_warning_nl('~n*** multiple entries for test id: ~w',[Id])
259 ; true),
260 update_last(Id),
261 fail.
262 set_last_test_id.
263
264 :- volatile last_test_id/1.
265 :- dynamic last_test_id/1.
266 update_last(Id) :- number(Id),!, % debug_print(9,Id), debug_print(9,' '),
267 retract(last_test_id(L)),
268 (Id>L -> Max = Id ; Max = L),
269 assertz(last_test_id(Max)).
270 update_last(Id) :- format_warning_nl('~n*** test id not a number: ~w',[Id]).
271
272 repeat_id(ID,Nr) :- repeat_id_aux(ID,0,Nr,success,10000000,0,0).
273 repeat_id_aux(ID,Nr,TotNr,Status1,Min,Max,Tot) :- Nr<TotNr, !, N1 is Nr+1,
274 statistics(walltime,[Start,_]),
275 run_tests_by_id(ID,Status,no_skipping),
276 (Status=success -> Status2=Status1, Col=blue ; Status2=Status, Col=red),
277 statistics(walltime,[Stop,_]),
278 WT is Stop - Start,
279 NewMin is min(Min,WT), NewMax is max(Max,WT), NewTot is Tot+WT,
280 Average is NewTot / N1,
281 format_with_colour_nl(user_output,[Col],'Test ~w repeated ~w/~w times~n Walltime Minumum = ~w ms, Maximum = ~w ms, Average = ~w ms~n Status=~w',[ID,N1,TotNr,NewMin,NewMax,Average,Status]),
282 repeat_id_aux(ID,N1,TotNr,Status2,NewMin,NewMax,NewTot).
283 repeat_id_aux(_,_,_TotNr,_Status,_Min,_Max,_).
284
285 % run a test with a specific id
286 run_id(X) :- run_tests_by_id(X,_,no_skipping).
287 run_tests_by_id(X) :- run_tests_by_id(X,_,allow_skipping).
288 run_tests_by_id(X,Status,AllowSkipping) :- reset_test_runner(id(X)),
289 phrase(tests_by_id(X), Testcases),
290 run_list(Testcases, AllowSkipping),
291 check_failed(Status).
292 tests_by_id([]) --> [].
293 tests_by_id(From-To) --> !,
294 ({From>To} -> []
295 ; tests_by_id(From), {F1 is From+1},
296 tests_by_id(F1-To)).
297 tests_by_id([Id | Ids]) --> !,
298 tests_by_id(Id),
299 tests_by_id(Ids).
300 tests_by_id(Id) -->
301 {cli_testcase(Id,TestCategories,TestInfos,Arguments,Comment)}, !,
302 [testcase(Id,TestCategories,TestInfos,Arguments,Comment)].
303 tests_by_id(Id) -->
304 {\+(cli_testcase(Id,_Category,_Infos,_Arguments,_Comment))}, !,
305 {format_warning_nl('*** No testcase with ID ~w found', [Id])}.
306
307 :- use_module(library(random),[random_select/3]).
308 run_random_tests(Nr) :-
309 findall(testcase(Id,TestCategories,TestInfos,Arguments,Comment),
310 cli_testcase(Id,TestCategories,TestInfos,Arguments,Comment),AllTests),
311 select_random_tests(Nr,AllTests,Tests),
312 reset_test_runner(random(Nr)),
313 run_list(Tests), check_failed(_).
314 select_random_tests(N,_,[]) :- N<1,!.
315 select_random_tests(_,[],[]) :- !.
316 select_random_tests(N,All,[X|Tests]) :-
317 random_select(X,All,Rest),
318 N1 is N-1,
319 select_random_tests(N1,Rest,Tests).
320
321
322 run_tests_all :- run_tests_all_except([]).
323
324 :- use_module(library(ordsets)).
325 run_tests_all_except(Categories) :- reset_test_runner(all_except(Categories)),
326 findall(Test, test_not_in_categories(Categories, Test), Tests),
327 run_list(Tests),
328 check_failed(_).
329
330 test_not_in_categories(Categories, Test) :-
331 sort(Categories, SC),
332 test_not_in_categories_aux(SC, Test).
333
334 :- use_module(library(ordsets), [ord_intersect/2]).
335 test_not_in_categories_aux(SC, testcase(Id,TestCategories,Infos,Arguments,Comment)) :-
336 cli_testcase(Id,TestCategories,Infos,Arguments,Comment),
337 sort(TestCategories,TC),
338 \+ ord_intersect(TC,SC).
339
340
341 :- dynamic silent_running/0, no_strict_running/0.
342 run_silently :- silent_running, !.
343 run_silently :- add_additional_arguments(['-silent']), assertz(silent_running).
344 run_no_strict :- (no_strict_running -> true ; assertz(no_strict_running)). % useful to see all errors of a test
345
346 % perform additional checks (e.g., used_ids info in AST) and throw errors when something is wrong
347 run_safely :- add_additional_preference('PROB_SAFE_MODE','TRUE').
348
349 % run all tests with a specific category
350 category(X) :- run_tests_by_category(X).
351 run_tests_by_category(X) :- run_tests_by_category(X,all).
352 run_tests_by_first_category(X) :- % used as entry in Jenkins when running all tests in parallel by category
353 run_tests_by_category(X,first).
354 run_tests_by_category(X,FirstOnly) :-
355 reset_test_runner(category(X,FirstOnly)),
356 (is_list(X) -> run_tests_by_category_aux(X,FirstOnly) ; run_tests_by_category_aux([X],FirstOnly)),
357 check_failed(_).
358 run_tests_by_category_aux(Categories,FirstOnly) :-
359 get_tests_for_categories(Categories,FirstOnly,List),
360 ( List=[] -> format_warning_nl('*** No testcase with categories ~w found', [Categories])
361 ; run_list(List)).
362
363 get_tests_for_categories(Categories,List) :-
364 get_tests_for_categories(Categories,all,List).
365 get_tests_for_categories(Categories,FirstOnly,List) :-
366 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
367 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
368 (FirstOnly=first -> TestCategories=[C1|_], member(C1,Categories)
369 ; intersect(Categories, TestCategories)) ),
370 List).
371
372 run_tests_using_command(Command) :- reset_test_runner(cmd(Command)),
373 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
374 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
375 member(Command, Arguments)),
376 List),
377 ( List=[] -> format_warning_nl('*** No testcase using command ~w found', [Command])
378 ; run_list(List), check_failed(_)).
379
380 run_tests_using_preference(Pref,Val) :- reset_test_runner(using_pref(Pref,Val)),
381 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
382 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
383 append(_,['-p',CmdPref,Val|_],Arguments),
384 match_preference(Pref,CmdPref) ),
385 List),
386 ( List=[] -> format_warning_nl('*** No testcase using preference ~w with value ~w found', [Pref,Val])
387 ; run_list(List), check_failed(_)).
388
389 :- use_module(probsrc(preferences),[eclipse_preference/2]).
390 match_preference(A,A).
391 match_preference(A,B) :- eclipse_preference(A,B) ; eclipse_preference(B,A).
392
393 % Generate file list (for copying) of a category :
394 show_files(Cat) :-
395 cli_testcase(Id, TestCategories, _Infos, Arguments, _Comment),
396 member(Cat, TestCategories), % print(Arguments),nl,
397 ( file_in_arguments(File,Arguments)
398 ;
399 get_testcase_diff_check_output(Id,_GeneratedFile,File)),
400 format('~w ',[File]),
401 fail.
402 show_files(_) :- nl.
403
404 file_in_arguments(F,['-p',_,_|T]) :- !, file_in_arguments(F,T).
405 file_in_arguments(F,[C,_|T]) :- binary_command(C),!, file_in_arguments(F,T).
406 file_in_arguments(F,[H|T]) :- is_file(H),H=F ; file_in_arguments(F,T).
407 binary_command(eval). binary_command(evalt). binary_command(evalf).
408 binary_command('-mc').
409 is_file(F) :- atom(F), atom_codes(F,Codes), member(47,Codes),!.
410
411
412 % Generate a Makefile for all listed Categories
413 % example: makefile_by_category('AlstomMakefile',[animate,history,sptxt,rel_fnc])
414 % the Makefile can be run to perform the listed tests on a compiled version of probcli
415 makefile_by_category(File, Categories) :-
416 (is_list(Categories) -> Cats = Categories ; Cats = [Categories]),
417 my_open(File, Stream),
418 format(Stream, 'PROBCLI=probcli~n',[]),
419 format(Stream, 'test_all:', []),
420 makefile_write_categories(Stream, Cats),
421 makefile_by_category_aux(Stream, Cats),
422 my_close(Stream).
423 makefile_by_category_aux(_Stream, []).
424 makefile_by_category_aux(Stream, [Cat | Cats]) :-
425 makefile_by_category_single(Stream, Cat),
426 makefile_by_category_aux(Stream, Cats).
427 makefile_by_category_single(Stream, Cat) :-
428 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
429 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
430 member(Cat, TestCategories)),
431 List),
432 (List=[] -> format_warning_nl('*** No testcase with category ~w found', [Cat])
433 ; format(Stream, '~n~w:~n', [Cat]), makefile_write_calls(Stream, List)).
434
435 my_open(user_output,S) :- !, S=user_output.
436 my_open(File,S) :- open(File,write,S).
437 my_close(user_output) :- !.
438 my_close(S) :- close(S).
439
440 makefile_write_categories(Stream, []) :-
441 format(Stream, '~n', []).
442 makefile_write_categories(Stream, [Cat | Cats]) :-
443 format(Stream, ' ~a', [Cat]),
444 makefile_write_categories(Stream, Cats).
445
446 makefile_write_calls(_Stream, []).
447 makefile_write_calls(Stream, [testcase(Id, _TestCategories, _Infos, Arguments, Comment) | Tests]) :-
448 (Comment='' -> true ; format(Stream,'\techo \"Test ~w : ~w\"~n',[Id,Comment])),
449 format(Stream, '\t$(PROBCLI)', []),
450 makefile_write_arguments(Stream, Arguments),
451 makefile_write_diff(Stream, Id),
452 makefile_write_calls(Stream, Tests).
453
454 makefile_write_arguments(Stream, []) :-
455 format(Stream, '~n', []).
456 makefile_write_arguments(Stream, [Arg | Args]) :-
457 format(Stream, ' ~w', [Arg]),
458 (quote_next_arg(Arg) -> makefile_write_arguments_quoted(Stream,Args)
459 ; makefile_write_arguments(Stream, Args)).
460
461 quote_next_arg('-goal'). % this will contain spaces ,... surround in "" for shell
462 quote_next_arg('--check_goal').
463 quote_next_arg('-check_goal').
464 quote_next_arg('-cbc_deadlock_pred').
465 quote_next_arg('-eval').
466 quote_next_arg('-evalt').
467 quote_next_arg('-evalf').
468 quote_next_arg('-cbc_sequence_with_target'). % actually quotes next two !
469 quote_next_arg('-cbc_sequence_with_target_all'). % ditto <- TO DO
470
471
472 makefile_write_arguments_quoted(Stream, []) :-
473 format(Stream, '~n', []).
474 makefile_write_arguments_quoted(Stream, [Arg | Args]) :-
475 format(Stream, ' \"~w\"', [Arg]),
476 makefile_write_arguments(Stream, Args).
477
478 makefile_write_diff(Stream, ID) :-
479 get_testcase_diff_check_output(ID, File1, File2),
480 format(Stream, '\tdiff -b ~w ~w~n', [File1, File2]),
481 fail.
482 makefile_write_diff(_Stream, _ID).
483
484 % -------------------------
485
486 copy(Cat) :- (Cat=[_|_] -> C=Cat ; C=[Cat]),
487 generate_copy_commands(C,'testarchive/').
488
489 :- use_module(probsrc(b_trace_checking),[get_default_trace_file/2]).
490 generate_copy_commands(Categories,Dest) :-
491 cli_testcase(ID, TestCategories, _Infos, Arguments, _Comment),
492 non_empty_inter(Categories,TestCategories), %print(ID),nl,
493 Arguments=[MainFile|_], generate_copy_command(MainFile,Dest), % print(MainFile),nl,
494 additional_testcase_file(ID,MainFile,Arguments,ExtraFile),
495 generate_copy_command(ExtraFile,Dest),
496 fail.
497 generate_copy_commands(_,_).
498
499 additional_testcase_file(ID,_,_,EFile) :- extra_testcase_file(ID,EFile).
500 additional_testcase_file(_ID,File,Arguments,TFile) :- member('-t',Arguments),
501 get_default_trace_file(File,TFile).
502 additional_testcase_file(ID,_,_,RefFile2) :- get_testcase_diff_check_output(ID,_File1,RefFile2).
503
504 non_empty_inter(A,B) :- member(X,A), member(X,B),!.
505
506 :- use_module(probsrc(tools_strings),[string_concatenate/3]).
507 :- use_module(probsrc(tools),[get_parent_directory/2]).
508 generate_copy_command(File,Dest) :-
509 safe_file_exists(File),
510 get_parent_directory(File,Dir),!,
511 string_concatenate(Dest,Dir,DestDir),
512 string_concatenate(Dest,File,DestFile),
513 format(user_output,'\tmkdir -p ~w~n',[DestDir]),
514 get_command_path(mkdir,MkCmdPath),
515 system_call(MkCmdPath, ['-p',DestDir],_Text1,_JExit1),
516 format(user_output,'\tcp ~w ~w~n',[File,DestFile]),
517 get_command_path(cp,CpCmdPath),
518 system_call(CpCmdPath, [File,DestFile],_Text2,_JExit2).
519 generate_copy_command(_,_).
520
521 % -------------------------
522
523 :- dynamic test_failed/1, last_test_failed/1, test_diff_failed/3, test_skipped/1.
524 halt_tests :-
525 prolog_error_occurred,
526 !,
527 halt1.
528 halt_tests :- test_failed(_),!,
529 halt1.
530 halt_tests :-
531 nl,print('TEST RUN SUCCESSFUL'),nl,
532 halt. % regular halt
533 halt1 :-
534 format_error_nl('TEST RUN FAILED',[]),
535 halt(1).
536
537 check_failed(failure) :- test_failed(X),!,print_failed_tests,
538 print('Use the following command to run individual tests: '),nl,
539 print(' ./prolog.sh --file tests/test_runner.pl --goal "run_id('),print(X),print(')."'),nl,
540 (halt1_allowed -> halt1 ; print('halt(1) :: dontstop mode'),nl).
541 check_failed(success) :- number_of_tests_run(Nr),
542 start_terminal_colour([green,bold],user_output),
543 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
544 bb_get(reset_test_runner_info,Info),
545 (Nr=1,NrSkips=0,Info\=category(_,_) -> format(user_output,'Test successful.~n',[])
546 ; NrSkips>0 -> format(user_output,'All ~w tests successful, ~w skipped (for ~w).~n',[Nr,NrSkips,Info])
547 ; format(user_output,'All ~w tests successful (for ~w).~n',[Nr,Info])),
548 reset_terminal_colour(user_output).
549 print_failed_tests :- number_of_tests_run(Nr),
550 findall(Y,test_failed(Y),Fails), length(Fails,NrFails),
551 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
552 start_terminal_colour([red,bold],user_error),
553 bb_get(reset_test_runner_info,Info),
554 format(user_error,'*** Tests run: ~w, skipped: ~w, failed: ~w (for ~w) ***~n*** Failed tests:~n',[Nr,NrSkips,NrFails,Info]),
555 test_failed(X), lookup_test_description(X,Desc),
556 format(user_error,'~w ~w~n',[X,Desc]),
557 fail.
558 print_failed_tests :- nl(user_error),
559 (user_interrupt_signal_received -> format(user_error,'Tests were interrupted by CTRL-C (user_interrupt)~n',[])
560 ; true),
561 reset_terminal_colour(user_error).
562
563 lookup_test_description(Id,Desc) :- cli_testcase(Id,_,_,_,Desc).
564
565 :- dynamic user_interrupt_signal_received/0.
566 run_list(List) :- run_list(List,allow_skipping).
567 run_list(List,AllowSkipping) :-
568 init_test_runner,
569 retractall(user_interrupt_signal_received),
570 length(List,Len),
571 run_single_testcase_list_aux(List,Len,AllowSkipping).
572 run_single_testcase_list_aux([],_,_) :- !.
573 run_single_testcase_list_aux(List,_Len,allow_skipping) :- user_interrupt_signal_received,!,
574 length(List,RestLen),
575 format('Skipping ~w remaining tests~n',[RestLen]),
576 maplist(assert_test_skipped,List).
577 run_single_testcase_list_aux([TC|Tail],Len,AllowSkipping) :-
578 print_progress_stats(Len),
579 run_single_testcase(TC,AllowSkipping),
580 run_single_testcase_list_aux(Tail,Len,AllowSkipping).
581 assert_test_skipped(Id) :- assertz(test_skipped(Id)).
582
583 print_progress_stats(All) :- number_of_tests_run(Nr), Nr>0,!,
584 findall(Y,test_failed(Y),Fails), length(Fails,NrFails),
585 (last_test_failed(Failed) -> ajoin(['(e.g. ', Failed, ')'],FailInfo) ; FailInfo = ''),
586 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
587 bb_get(reset_test_runner_wtime,WStart),
588 statistics(walltime,[WNow,_]), Delta is round((WNow - WStart) / 1000),
589 bb_get(reset_test_runner_info,Info),
590 format_progress_nl('Progress: ~w/~w tests run, ~w skipped, ~w failed ~w (running since ~w sec for ~w)',[Nr,All,NrSkips,NrFails,FailInfo,Delta,Info]).
591 print_progress_stats(_).
592
593 cache :- add_additional_arguments(['-cache','/Users/leuschel/svn_root/NewProB/examples/cache/']).
594 v :- add_additional_arguments(['-v']). % verbose
595 vv :- add_additional_arguments(['-vv']). % very_verbose
596
597 :- volatile additional_arguments/1.
598 :- dynamic additional_arguments/1.
599 % add additional cli arguments when running tests:
600 add_additional_arguments(List) :-
601 (retract(additional_arguments(Old)) -> true ; Old=[]),
602 append(Old,List,New),
603 assertz(additional_arguments(New)).
604
605 reset_additional_arguments :- retractall(additional_arguments(_)).
606
607 % auto complete a preference name and print error if no match found
608 get_preference_name(Prefix,Name) :- auto_complete_preference(Prefix,Name),!.
609 get_preference_name(Prefix,_) :-
610 format_warning_nl('No matching preference found for: ~w',[Prefix]),fail.
611
612 :- use_module(probsrc(preferences),[eclipse_preference/2]).
613 auto_complete_preference(Prefix,Name) :-
614 atom_codes(Prefix,PC),
615 eclipse_preference(EP,Name),
616 (atom_codes(EP,EPC) ; atom_codes(Name,EPC)),
617 append(PC,_,EPC).
618
619 % add additional preference when running tests:
620 add_additional_preference(PREF,PREFVAL) :-
621 (retract(additional_arguments(Old0)) -> remove_matching_pref(Old0,PREF,Old) ; Old=[]),
622 New = ['-p',PREF,PREFVAL|Old],
623 format('New additional arguments: ~w~n',[New]),
624 assertz(additional_arguments(New)).
625
626 % remove all preferences conflicting with PREF
627 remove_matching_pref([],_PREF,[]).
628 remove_matching_pref(['-p',P,OLD|T],PREF,Res) :- !,
629 (P=PREF -> Res=T ; Res = ['-p',P,OLD|RT], remove_matching_pref(T,PREF,RT)).
630 remove_matching_pref([H|T],PREF,[H|RT]) :- remove_matching_pref(T,PREF,RT).
631
632 % remove all preferencs conflicting with other list of prefs
633 remove_matching_prefs([],P,P).
634 remove_matching_prefs(['-cache',_File|T],InPrefs,Res) :- !,
635 remove_matching_prefs(T,InPrefs,Res).
636 remove_matching_prefs(['-p',PREF,_|T],InPrefs,Res) :-
637 remove_matching_pref(InPrefs,PREF,In2),
638 remove_matching_prefs(T,In2,Res).
639 remove_matching_prefs([_|T],InPrefs,Res) :-
640 remove_matching_prefs(T,InPrefs,Res).
641
642 :- use_module(probsrc(pathes_lib), [unavailable_extension/2]).
643 unavailable_extension_for_test(Id,TestCategories,Ext,Reason) :-
644 ? test_requires_extension(Id,TestCategories,Ext),
645 unavailable_extension(Ext,Reason).
646
647 :- dynamic skip_all_tests/0.
648
649 skip_test(_, _, _, ReasonMsg, ReasonTerm) :-
650 skip_all_tests,
651 !,
652 ReasonMsg = 'skipping all tests',
653 ReasonTerm = skip_all_tests.
654 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
655 member(skip,TestConfigurationInfos),
656 !,
657 ReasonMsg = 'test marked as skipped',
658 ReasonTerm = skip.
659 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
660 member(conditional_skip(Callable), TestConfigurationInfos),
661 % Evaluate the condition in the context of the testcases module,
662 % where it was defined, so that imports are visible to the condition.
663 % We can't use a meta_predicate declaration here,
664 % because the goal is nested inside a term.
665 call(testcases:Callable),
666 !,
667 ReasonMsg = 'skip condition is true',
668 ReasonTerm = conditional_skip(Callable).
669 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
670 member(private, TestCategories),
671 \+ absolute_file_name(prob_examples(examples), _, [access(exist), file_type(directory), file_errors(fail)]),
672 !,
673 ReasonMsg = 'test requires non-public examples which are not available',
674 ReasonTerm = private.
675 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
676 member(private_source_not_available, TestCategories),
677 \+ directory_exists('../private_examples'),
678 !,
679 ReasonMsg = 'test requires private examples which are not available',
680 ReasonTerm = private_source_not_available.
681 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
682 current_prolog_flag(dialect, swi),
683 % TODO Handle swi_expected_failure differently.
684 % Ideally, tests that are expected to fail on SWI should not be skipped,
685 % but instead should be run normally with any failures ignored.
686 % This would allow detecting when tests unexpectedly succeed
687 % (i. e. have been fixed, but not unmarked as expected failure yet).
688 member(swi_expected_failure, TestConfigurationInfos),
689 !,
690 ReasonMsg = 'test expected to fail on SWI-Prolog',
691 ReasonTerm = swi_expected_failure.
692 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
693 ? group_cannot_be_checked_on_ci_server(Category),
694 member(Category, TestCategories),
695 !,
696 ReasonMsg = 'category cannot be checked on CI',
697 ReasonTerm = Category.
698 skip_test(Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
699 unavailable_extension_for_test(Id,TestCategories,Ext,Reason),
700 !,
701 ajoin(['test requires an unavailable extension (',Reason,')'],ReasonMsg),
702 ReasonTerm = Ext.
703
704
705 % RUNNING SINGLE TESTCASE:
706 % ------------------------
707
708 run_single_testcase(testcase(Id,TestCategories,Infos,Arguments,Comment),AllowSkipping) :-
709 skip_test(Id,TestCategories,Infos,ReasonMsg,ReasonTerm),
710 (AllowSkipping = allow_skipping -> true
711 ; % Skipping disabled - print message, then continue to next clause to run the test anyway.
712 format_progress_nl('Running test that would normally be skipped - ~w: ~q', [ReasonMsg,ReasonTerm]),
713 fail),
714 !,
715 full_flush,
716 format_with_colour_nl(user_output,[blue,bold],'Skipping test ~w (~w) because ~w: ~q', [Id,Comment,ReasonMsg,ReasonTerm]),
717 print_junit_skip(Arguments),
718 assertz(test_skipped(Id)),
719 full_flush.
720 run_single_testcase(testcase(Id,_TestCategories,_Infos,Arguments,Comment),_) :-
721 full_flush,
722 format_progress_nl('Running test ~w ~n ~w',[Id,Comment]),
723 (debug_mode(off) -> true ; print('testcase: probcli '), print_args(Arguments),nl),
724 clear_diff_output(Id),
725 clear_logxml_output(Id),
726 prob_junit_args(JUnitArgs),
727 maplist(patch_prob_examples_loc,Arguments,Arguments0), % update path to prob_examples if necessary
728 append(Arguments0, JUnitArgs, Arguments1),
729 (additional_arguments(ExtraArgs)
730 -> remove_matching_prefs(ExtraArgs,Arguments1,RemArguments1), % remove conflicting arguments now overriden
731 append(ExtraArgs,RemArguments1,Arguments2)
732 ; Arguments1=Arguments2),
733 ? (tests_multiply_timeout(Factor) -> modify_timeout(Factor,Arguments2,Arguments3) ; Arguments3 = Arguments2),
734 (no_strict_running, select('-strict',Arguments3,Arguments4) -> true ; Arguments4=Arguments3),
735 (silent_running -> true ; print('executing: probcli '), print_args(Arguments4),nl),
736 full_flush,
737 test_started(Id),
738 catch(prob_cli:run_probcli(Arguments4,[test_runner]), Exception, true), !,
739 test_finished(Id,Walltime),
740 (test_exception_is_success(Exception)
741 -> check_diff_output(Id),
742 check_logxml(Id)
743 ; format_warning_nl('Exception in test ~w: ~w',[Id,Exception]),
744 handle_test_exception(Exception),
745 print_test_failed(Id,Exception)
746 ),
747 (prolog_error_occurred ->
748 print_test_failed(Id,'Error(s) from Prolog during test! See console log for details. Aborting test run.'),
749 halt1
750 ; true),
751 !,
752 (test_failed(Id) % asserted by check_diff_output/check_logxml/print_test_failed
753 -> true % messages already printed above
754 ; format_with_colour_nl(user_output,[green],'Test ~w completed successfully (in ~w ms)~n',[Id,Walltime])
755 ),
756 safe_reset_cli(Id),
757 full_flush.
758
759 test_exception_is_success(Exception) :- var(Exception), !. % No exception was actually thrown
760 test_exception_is_success(Exception) :- Exception == halt(0). % can be thrown by typechecker_test
761
762 handle_test_exception(Exception) :-
763 ( Exception = error(_,_) -> Type = 'Error'
764 ; Exception = enumeration_warning(_,_,_,_,_) -> Type = 'Enumeration exception'
765 ; Exception = solver_and_provers_too_weak -> Type = 'Solver exception'
766 ; fail
767 ),
768 !,
769 (halt1_allowed
770 -> format_warning_nl('~w during test execution: ~w',[Type,Exception]),
771 halt1
772 ; true).
773 handle_test_exception(halt(1)) :- !.
774 handle_test_exception(parse_errors(_)) :- !.
775 handle_test_exception(user_interrupt_signal) :- !,
776 assertz(user_interrupt_signal_received),
777 format_warning_nl('CTRL-C received, aborting tests',[]).
778 handle_test_exception(Exception) :-
779 format_warning_nl('Exception not caught in test_runner: ~w',[Exception]),
780 halt1.
781
782 safe_reset_cli(Id) :-
783 catch(prob_cli:reset_cli, % when bundles/extensions are missing we can get exceptions here
784 Exception,
785 (halt1_allowed
786 -> format_warning_nl('Exception while resetting probcli: ~w',[Exception]),
787 halt1
788 ; print_test_failed(Id,Exception)
789 )).
790
791 :- dynamic test_took_aux/2.
792 :- dynamic last_testcase_run/1, number_of_tests_run/1.
793 number_of_tests_run(0).
794 reset_nr_of_tests :- retractall(number_of_tests_run(_)), assertz(number_of_tests_run(0)).
795
796 :- use_module(library(system),[now/1, datime/2]).
797 :- dynamic performance_session_running/1, performance_session_stats/4.
798
799 performance_session_start :-
800 now(When),
801 datime(When,datime(Year,Month,Day,Hour,Min,Sec)),
802 format('~nStarting Codespeed Performance Monitoring session ~w:~w:~w:~w:~w:~w~n',[Year,Month,Day,Hour,Min,Sec]),
803 retractall(performance_session_running(_)),
804 assertz(performance_session_running(When)).
805
806 :- use_module(probsrc(parsercall),[get_parser_version/1]).
807 :- use_module(probsrc(version), [version_str/1, revision/1, lastchangeddate/1, format_prob_version/1]).
808 performance_session_end(FilePrefix) :-
809 performance_session_running(When),
810 datime(When,datime(Year,Month,Day,Hour,Min,Sec)),
811 %tools:ajoin([FilePrefix,':',Year,Month,Day,Hour,Min,Sec],FileName),
812 format('~nFinishing Codespeed session ~w:~w:~w:~w:~w:~w~n -> File : ~w~n',[Year,Month,Day,Hour,Min,Sec,FilePrefix]),
813 open(FilePrefix,append,S),
814 format(S,'~n/* Codespeed session ~w:~w:~w:~w:~w:~w */~n',[Year,Month,Day,Hour,Min,Sec]),
815 version_str(Vers), portray_clause(S, session_prob_version(When,Vers)),
816 revision(Rev), portray_clause(S, session_prob_revision(When,Rev)),
817 lastchangeddate(DD), portray_clause(S, session_prob_lastchangeddate(When,DD)),
818 get_parser_version(PV), portray_clause(S, session_prob_parser_version(When,PV)),
819 current_prolog_flag(version, PrologVString),
820 portray_clause(S, session_prolog_version_string(When,PrologVString)),
821 current_prolog_flag(version_data, PrologVData),
822 portray_clause(S, session_prolog_version_data(When,PrologVData)),
823 current_prolog_flag(host_type, HostType),
824 portray_clause(S, session_prolog_host_type(When,HostType)),
825 write_perf_data(When,S).
826
827 write_perf_data(When,S) :- additional_arguments(New),
828 portray_clause(S, stored_additional_arguments(When,New)),
829 fail.
830 write_perf_data(When,S) :- performance_session_stats(When,Id,Time,WTime),
831 portray_clause(S, stored_performance_test_stats(When,Id,Time,WTime)),
832 fail.
833 write_perf_data(_When,S) :- nl(S), nl(S), close(S).
834
835
836
837 test_started(Id) :-
838 retractall(last_testcase_run(_)), assertz(last_testcase_run(Id)),
839 retractall(test_took_aux(_,_)),
840 statistics(runtime,[Start,_]),
841 statistics(walltime,[WStart,_]),
842 bb_put(test_started,Start),
843 bb_put(test_started_wtime,WStart),
844 bb_put(test_target_coverage,0). % a special value which can be increased in ProB's source code
845 % useful to measure the number of tests that cover a new feature
846
847 :- public inc_test_target_coverage/0.
848 % call this from prob_prolog if some code point you wish to test is covered
849 % using the stats repl command you can then see how many tests have covered this code point
850 inc_test_target_coverage :-
851 bb_get(test_target_coverage,X),
852 X1 is X+1,
853 bb_put(test_target_coverage,X1).
854
855 :- dynamic test_stats/6.
856 test_finished(Id,WTime) :-
857 statistics(runtime,[End,_]),
858 statistics(walltime,[WEnd,_]),
859 bb_get(test_started,Start),
860 bb_get(test_started_wtime,WStart),
861 bb_get(test_target_coverage,Covered),
862 Time is End - Start, WTime is WEnd- WStart,
863 retractall(test_took_aux(_,_)),
864 assertz(test_took_aux(Time,WTime)),
865 (retract(number_of_tests_run(Nr)) -> N1 is Nr+1 ; N1=1),
866 assertz(number_of_tests_run(N1)),
867 (retract(test_stats(Id,PrevTime,PrevWTime,_,_,_))
868 -> assertz(test_stats(Id,Time,WTime,PrevTime,PrevWTime,Covered))
869 ; assertz(test_stats(Id,Time,WTime,-1,-1,Covered))
870 ),
871 (performance_session_running(When)
872 -> assertz(performance_session_stats(When,Id,Time,WTime))
873 ; true).
874
875 print_delta_stats :- print('Comparing walltimes with previous test run: '),nl,
876 findall(delta(DeltaPerc,DeltaWTime,Id),test_delta_stat(Id,DeltaPerc,DeltaWTime),L),
877 (L=[] -> print('No previous run information available'),nl
878 ; print(' ID | % (delta absolute) | walltime (runtime)~n'),nl,
879 sort(L,SL),
880 maplist(print_delta,SL)).
881 test_delta_stat(Id,DeltaPerc,DeltaWTime) :-
882 test_stats(Id,_RTime,WTime,_PrevRTime,PrevWTime,_),
883 PrevWTime>0,
884 DeltaWTime is WTime - PrevWTime,
885 DeltaPerc is (100*DeltaWTime) / PrevWTime.
886 print_delta(delta(DeltaPerc,DeltaWTime,Id)) :-
887 test_stats(Id,RTime,WTime,_PrevRTime,PrevWTime,_Cov),
888 format(' ~w | ~2f % (~w ms) | ~w ms (~w ms runtime) [~w walltime ms previously]~n',
889 [Id,DeltaPerc,DeltaWTime,WTime,RTime,PrevWTime]).
890
891 :- use_module(probsrc(tools),[print_memory_used_wo_gc/0]).
892 print_current_stats :- print_current_stats(user_output,' | ').
893 print_current_stats(Stream,Sep) :-
894 print_memory_used_wo_gc,nl,
895 bb_put(test_target_coverage_nr,0),
896 bb_put(test_target_coverage_count,0),
897 bb_put(test_counter_nr,0),
898 format(Stream,' ID~wOK~wWALLTIME (ms)~wRUNTIME (ms)~wCOV~wDESCRIPTION~n',[Sep,Sep,Sep,Sep,Sep]),
899 test_stats(Id,RTime,WTime,_PrevRTime,_PrevWTime,NrTargetCoverage),
900 cli_testcase(Id,_Cat,_,_Args,Desc),
901 (test_failed(Id) -> OK = '*FAILED*' ; OK = ' OK '),
902 format(Stream,' ~w~w~w~w~w~w~w~w~w~w~w~n',
903 [Id,Sep,OK,Sep,WTime,Sep,RTime,Sep,NrTargetCoverage,Sep,Desc]),
904 bb_get(test_counter_nr,Nr), Nr1 is Nr+1, bb_put(test_counter_nr,Nr1),
905 (NrTargetCoverage>0
906 -> bb_get(test_target_coverage_nr,TN), TN1 is TN+1, bb_put(test_target_coverage_nr,TN1),
907 bb_get(test_target_coverage_count,NC), NC1 is NC+NrTargetCoverage,
908 bb_put(test_target_coverage_count,NC1)
909 ),
910 fail.
911 print_current_stats(_,_) :-
912 bb_get(test_counter_nr,NrT),
913 format('Number of tests: ~w~n',[NrT]),
914 (bb_get(test_target_coverage_nr,Nr), Nr>0 ->
915 bb_get(test_target_coverage_count,NrC),
916 Perc is (100.0 * Nr) / NrT,
917 format('Number of tests reaching test source code target: ~w (~2f %, total hits: ~w)~n',[Nr, Perc, NrC])
918 ; true).
919
920 test_took(Time,WTime) :- test_took_aux(Time,WTime), !.
921 test_took(0,0) :-
922 format_warning_nl('test_took/2 called before test_finished/2, this should not happen!',[]).
923
924 print_junit_skip(Arguments) :-
925 prob_junit_dir(Dir)
926 -> set_junit_dir(Dir),
927 create_and_print_junit_result(['Integration Tests'],Arguments,0,skip)
928 ; true.
929
930 % if the test expects a time_out error, the timeout is not expanded
931 % otherwise, timeout is increased to allow coverage analysis / junit / etc to finish
932 :- use_module(probsrc(tools_meta), [no_time_out_value/1]).
933 multiply_and_truncate_timeout(OrigTimeout, Factor, NewTimeout) :-
934 no_time_out_value(NoTimeout),
935 % Make sure that the new timeout never exceeds the special "no timeout" value,
936 % otherwise ProB prints lots of warnings about the timeout being too high.
937 NewTimeout is min(round(Factor * OrigTimeout), NoTimeout). % Factor could be float
938
939 modify_timeout(_,OldOptions,New) :-
940 append(_,['-expcterr','time_out'|_],OldOptions), !, New=OldOptions. % we expect a time_out
941 modify_timeout(Factor,[],['-p','TIME_OUT',NVal]) :-
942 % timeout was not set at all - set it to Factor*Default
943 % Note: there is a potential problem when the time_out is set inside the machine and not in the test !! TO DO: fix
944 preferences:preference_default_value(time_out,DEFAULT),
945 multiply_and_truncate_timeout(DEFAULT, Factor, NVal).
946 modify_timeout(Factor,[GTC,OLD|T],[GTC,NewT|MT]) :- is_global_time_out_cmd(GTC),!,
947 tools:arg_is_number(OLD,OLDT),
948 multiply_and_truncate_timeout(Factor, OLDT, NewT),
949 modify_timeout(Factor,T,MT).
950 modify_timeout(Factor,['-p','TIME_OUT',OLD|T],['-p','TIME_OUT',NewT|T]) :-
951 tools:arg_is_number(OLD,OLDT), !,
952 multiply_and_truncate_timeout(Factor, OLDT, NewT). % TODO: we currently assume global_time_out appears before
953 ?modify_timeout(Factor,[H|T],[H|MT]) :- modify_timeout(Factor,T,MT).
954
955 is_global_time_out_cmd('-global_time_out').
956 is_global_time_out_cmd('--global_time_out').
957 is_global_time_out_cmd('-time_out'). % old version
958
959 full_flush :- flush_output(user_output), flush_output(user_error).
960
961 print_args([]).
962 print_args([H|T]) :- print(H), print(' '), print_args(T).
963
964 print_test_failed(Id,Msg) :-
965 print_test_failed(Id,Msg,'').
966 print_test_failed(Id,Msg1,Msg2) :-
967 (Msg1 = user_interrupt_signal -> assertz(user_interrupt_signal_received) ; true),
968 test_took(RunTime,WallTime),
969 cli_testcase(Id,Categories,_Infos,Arguments,Comment),
970 Categories = [FirstCat|_], !,
971 ErrorMessage = ['Test with Id',Id,'failed.','Test Categories: ',Categories,'Test Arguments: ',Arguments,'Test Comment: ',Comment,'Error Messages in Test Runner: ',Msg1,Msg2,'Content of Error Manager: '],
972 (get_all_errors(AllErrors) -> true ; AllErrors = []),
973 append(ErrorMessage,AllErrors,FullErrorMessage),
974 create_and_print_junit_result(['Integration Tests',FirstCat],Id,RunTime,error(FullErrorMessage)),
975 assert_test_failed(Id),
976 start_terminal_colour(red,user_error),
977 format(user_error,'*** Test ~w FAILED (after ~w ms runtime, ~w ms walltime): ~w~w~n',
978 [Id,RunTime,WallTime,Msg1,Msg2]),
979 write('***'),print_memory_used_wo_gc,nl,
980 reset_terminal_colour(user_error).
981
982 assert_test_failed(Id) :-
983 assertz(test_failed(Id)),
984 retractall(last_test_failed(_)),
985 assertz(last_test_failed(Id)).
986
987 diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText) :-
988 test_took(Time,_),
989 atom_codes(DiffOutputAtom,DiffOutputText),
990 atom_codes(CmpOutputAtom,CmpOutputText),
991 ErrMsg = ['Diff for test with Id\n',Id,'\nfailed:\n','Output file\n',F1,'\ndoes not correspond to stored version\n',F2,
992 '\nOutput of Diff:\n',DiffOutputAtom,
993 '\nOutput of Cmp:\n',CmpOutputAtom],
994 create_and_print_junit_result(['Diff Checking'],Id,Time,error(ErrMsg)),
995 assert_test_failed(Id),
996 assertz(test_diff_failed(Id,F1,F2)),
997 start_terminal_colour(red,user_error),
998 format(user_error,'*** Test ~w FAILED: Diff failed:~nOutput file ~w~ndoes not correspond to stored version~n~w~n', [Id,F1,F2]),
999 format(user_error,'Diff:~n~s~n',[DiffOutputText]),
1000 format(user_error,'Cmp:~n~s~n',[CmpOutputText]),
1001 reset_terminal_colour(user_error).
1002
1003 :- use_module(probsrc(tools_commands),[diff_files_with_editor/2]).
1004 diff_in_editor :- findall(I,test_diff_failed(I,_,_),LI), sort(LI,SI),
1005 format('Opening failed diff files in editor: ~w~n',[SI]),
1006 test_diff_failed(Id,F1,F2),
1007 format('Test ~w~n~w ~w~n',[Id,F1,F2]),
1008 diff_files_with_editor(F1,F2),
1009 fail.
1010 diff_in_editor.
1011
1012
1013 clear_diff_output(Id) :- % clear all files that should be generated
1014 get_testcase_diff_check_output(Id,GeneratedFile,_StoredReferenceFile),
1015 safe_file_exists(GeneratedFile),
1016 (get_testcase_do_not_delete(Id,GeneratedFile) -> formatsilent(user_output,'% Keeping: ~w~n',[GeneratedFile])
1017 ; formatsilent(user_output,'% Deleting: ~w~n',[GeneratedFile]),
1018 delete_file(GeneratedFile)
1019 ),fail.
1020 clear_diff_output(_).
1021
1022 check_diff_output(Id) :-
1023 findall(diff(Id,GeneratedFile,StoredReferenceFile),
1024 get_testcase_diff_check_output(Id,GeneratedFile,StoredReferenceFile),
1025 ListOfDiffsToCheck),
1026 maplist(check_diff_output2, ListOfDiffsToCheck).
1027
1028 check_diff_output2(diff(Id,GeneratedFile,StoredReferenceFile)) :-
1029 \+ safe_file_exists(GeneratedFile) -> print_test_failed(Id,'Output file does not exist:',GeneratedFile) ;
1030 \+ safe_file_exists(StoredReferenceFile) -> print_test_failed(Id,'Stored file does not exist:',StoredReferenceFile) ;
1031 diff(Id,GeneratedFile,StoredReferenceFile).
1032
1033 diff(Id,F1,F2) :-
1034 formatsilent(user_output,'% Checking: diff / cmp ~w ~w~n',[F1,F2]),
1035 get_command_path(diff,DiffPath),
1036 get_command_path(cmp,CmpPath),
1037 (system_call(DiffPath,['-b',F1,F2],DiffOutputText,_ErrTextDiff,ExitDiff) % use -q for quiet
1038 -> true
1039 ; DiffOutputText = "*** CALLING DIFF FAILED !", ExitDiff = fail
1040 ),
1041 formatsilent(user_output,'% Checking: cmp ~w ~w~n',[F1,F2]),
1042 (system_call(CmpPath,['-b',F1,F2],CmpOutputText,_ErrTextCmp,_ExitCmp)
1043 -> true
1044 ; CmpOutputText = "*** CALLING CMP FAILED !"
1045 ),
1046 (ExitDiff = exit(0)%, ExitCmp = exit(0)
1047 -> true
1048 ; diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText)).
1049
1050
1051 % ----------------
1052 :- volatile logxml_file/1.
1053 % logxml file used automatically when started by test_runner
1054 :- dynamic logxml_file/1.
1055 clear_logxml_output(_Id) :- logxml_file(GeneratedFile),
1056 safe_file_exists(GeneratedFile),
1057 delete_file(GeneratedFile),
1058 fail.
1059 clear_logxml_output(_).
1060
1061 set_logxml_file(File) :- retractall(logxml_file(_)), assertz(logxml_file(File)),
1062 format('Adding -logxml ~w.~n',[File]),
1063 add_additional_arguments(['-logxml',File]).
1064
1065 :- use_module(probsrc(logger),[read_xml_log_file/2]).
1066 check_logxml(Id) :-
1067 logxml_file(File),!,
1068 xmllint(Id,File),
1069 check_logxml(Id,File).
1070 check_logxml(_).
1071
1072 xmllint(Id,File) :-
1073 (call_xmllint(File,ErrText,ExitLint)
1074 -> (ExitLint = exit(0) -> true
1075 ; print_test_failed(Id,'xmllint reported errors for file:',File),
1076 formatsilent(user_error,'% xmllint result~n~s~n',[ErrText])
1077 )
1078 ; print_test_failed(Id,'Calling xmllint failed for file:',File)
1079 ).
1080
1081 :- dynamic use_logxml_schema/0.
1082 set_use_logxml_schema :- (use_logxml_schema -> true ; assertz(use_logxml_schema)).
1083 call_xmllint(File,ErrText,ExitLint) :- use_logxml_schema,
1084 absolute_file_name(prob_home('doc/logxml_xsd.xml'),SchemaFile),
1085 file_exists(SchemaFile),!,
1086 get_command_path(xmllint,LintPath),
1087 format('Calling ~w with schema ~w for logxml file ~w~n',[LintPath,SchemaFile,File]),
1088 system_call(LintPath,['--schema', SchemaFile, File, '-noout'],_OutputText,ErrText,ExitLint).
1089 call_xmllint(File,ErrText,ExitLint) :-
1090 get_command_path(xmllint,LintPath),
1091 format('Calling ~w without schema for logxml file ~w~n',[LintPath,File]),
1092 system_call(LintPath,[File],_OutputText,ErrText,ExitLint).
1093
1094 check_logxml(Id,File) :-
1095 catch(read_xml_log_file(File,Infos), E,
1096 print_test_failed(Id,'Exception trying to read logxml file:',E)),
1097 !,
1098 format_progress_nl('Read logxml file ~w, contains ~w.',[File,Infos]),
1099 check_error_cound(Id,Infos).
1100 check_logxml(Id,File) :-
1101 print_test_failed(Id,'Logxml file could not be read:',File).
1102
1103 check_error_cound(Id,Infos) :-
1104 testcase_expects_errors(Id),!,
1105 (member(expected_errors/E,Infos),E>0 -> true ; print_test_failed(Id,'Logxml file does not report errors:',Infos)).
1106 check_error_cound(Id,Infos) :-
1107 (member(errors/0,Infos),member(expected_errors/0,Infos) -> true
1108 ; print_test_failed(Id,'Logxml file reports errors:',Infos)).
1109
1110 testcase_expects_errors(Id) :-
1111 cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1112 member(A,Arguments), expect_error_arg(A).
1113
1114 expect_error_arg('-expcterr').
1115 expect_error_arg('-expecterr').
1116 expect_error_arg('-expect').
1117 expect_error_arg('-expcterrpos').
1118
1119 % ------------------
1120
1121 :- volatile repl_mode/0.
1122 :- dynamic repl_mode/0.
1123
1124 :- use_module(test_paths, [get_prob_examples_override/1, set_prob_examples_override/1]).
1125
1126 set_prob_examples_location(Dir) :-
1127 (atom(Dir) -> DirAtom = Dir ; atom_codes(DirAtom, Dir)),
1128 format('Setting location of prob_examples directory to: ~s~n',[DirAtom]),
1129 set_prob_examples_override(DirAtom).
1130
1131 % Prefixes of arguments that contain file paths,
1132 % which may need to be patched to an alternate prob_examples location.
1133 path_arg_prefix("#file ").
1134 path_arg_prefix(":cdclt #file ").
1135 path_arg_prefix(":cdclt-file ").
1136 path_arg_prefix(":cdclt-free #file ").
1137 path_arg_prefix(":cdclt-double-check #file ").
1138 path_arg_prefix(":cdclt-free-double-check #file ").
1139 path_arg_prefix(":prob-file ").
1140 path_arg_prefix(":z3-file ").
1141 path_arg_prefix(":z3-free-file ").
1142 path_arg_prefix("").
1143
1144 % update path to prob_examples if necessary:
1145 patch_prob_examples_loc_0(NewLoc,Arg,PatchedArg) :-
1146 path_arg_prefix(Prefix),
1147 append(Prefix, ArgTail, Arg),
1148 append("../prob_examples", PathTail, ArgTail),
1149 !,
1150 append(NewLoc, PathTail, PatchedArgTail),
1151 append(Prefix, PatchedArgTail, PatchedArg).
1152
1153 patch_prob_examples_loc(Arg,PatchedArg) :-
1154 get_prob_examples_override(NewLocAtom),
1155 atom_codes(NewLocAtom, NewLoc),
1156 atom(Arg),
1157 atom_codes(Arg,ArgC),
1158 patch_prob_examples_loc_0(NewLoc,ArgC,PatchedArgC),
1159 !,
1160 atom_codes(PatchedArg,PatchedArgC),
1161 format('Patched ~w to ~w~n',[Arg,PatchedArg]).
1162 patch_prob_examples_loc(A,A).
1163
1164 get_testcase_do_not_delete(Id,PF) :-
1165 cli_testcase_do_not_delete(Id,F),
1166 patch_prob_examples_loc(F,PF).
1167
1168 get_testcase_diff_check_output(Id,PF1,PF2) :-
1169 cli_testcase_diff_check_output(Id,F1,F2),
1170 patch_prob_examples_loc(F1,PF1),
1171 patch_prob_examples_loc(F2,PF2).
1172
1173 :- use_module(extension('counter/counter'),[counter_init/0]).
1174 :- use_module(probsrc(prob_startup), [startup_prob/0]).
1175 init_test_runner :- startup_prob,counter_init.
1176 :- use_module(library(lists),[maplist/2]).
1177 % a minimal shell to execute tests:
1178 test_repl :-
1179 init_test_runner,
1180 format_prob_version(user_output),nl,
1181 assertz(repl_mode),
1182 current_prolog_flag(argv,ArgV), treat_argv(ArgV),
1183 test_repl_loop,
1184 retractall(repl_mode).
1185
1186
1187 treat_argv(['-prob-examples',Dir|T]) :- !, set_prob_examples_location(Dir),
1188 treat_argv(T).
1189 treat_argv(Args) :- maplist(eval_argv,Args).
1190
1191 % execute tests provided on the command-line:
1192 eval_argv(Cmd) :- format('ARGV ==> ~w~n',[Cmd]),
1193 atom_codes(Cmd,C), safe_number_codes(Nr,C), !, test_eval(Nr).
1194 eval_argv(Cmd) :- test_eval(Cmd),!.
1195
1196 test_repl_loop :- safe_read(T), test_eval(T), !, test_repl_loop.
1197 test_repl_loop.
1198
1199 safe_read(T) :-
1200 catch(
1201 (prompt(OldPrompt, 'TEST ==> '), call_cleanup(read(T), prompt(_, OldPrompt))),
1202 error(syntax_error(E),_),
1203 (format_warning_nl('*** Syntax error: ~w~n*** Type Prolog term followed by a dot(.) and enter.',[E]),
1204 safe_read(T))).
1205
1206 :- meta_predicate wall(0).
1207 wall(Call) :-
1208 statistics(walltime,[Start,_]),
1209 call(Call),
1210 statistics(walltime,[Stop,_]), WT is Stop-Start,
1211 format('Walltime: ~w ms~n',[WT]),
1212 print_memory_used_wo_gc,nl.
1213
1214 % -------------------------
1215
1216 :- use_module(library(file_systems)).
1217 :- use_module(probsrc(tools),[get_options/5]).
1218 % true for test_files
1219 test_file(Id,File,AbsFileName) :- cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1220 get_options(Arguments,prob_cli:recognised_cli_option,_Options,Files,fail),
1221 member(File,Files),
1222 is_existing_file(File),
1223 absolute_file_name(File,AbsFileName).
1224
1225 is_existing_file(X) :- \+ number(X), atom(X),
1226 atom_codes(X,Codes),[BS] = "/", (member(BS,Codes) -> true),
1227 file_exists(X).
1228
1229 % obtain a list of all files used in tests
1230 all_files(Files) :- findall(F,test_file(_,_,F),A), sort(A,Files).
1231
1232 % a test file that can be loaded:
1233 valid_test_file(Id,File,AbsFileName,XT) :-
1234 test_file(Id,File,AbsFileName),
1235 cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1236 \+ append(_,['-expcterr', load_main_file |_],Arguments),
1237 tools:get_filename_extension(File,XT).
1238 all_valid_files(Files,Mode) :-
1239 findall(F, (valid_test_file(_,_,F,XT),
1240 relevant_extension(XT,Mode)),
1241 A),
1242 sort(A,Files).
1243
1244 % traverse a directory and indicate which specification files are used in tests and which ones not
1245 traverse :- traverse('../prob_examples/public_examples/').
1246 traverse(SD) :- all_files(Files), absolute_file_name(SD,StartDir),
1247 format('Examining files in ~w~n + means file is used in some test~n~n',[StartDir]),
1248 traverse(StartDir,Files).
1249
1250 traverse(Dir,AllFiles) :- file_member_of_directory(Dir,_,FullFile),
1251 tools:get_filename_extension(FullFile,XT),
1252 (member(FullFile,AllFiles) -> format(' + ~w~n',[FullFile])
1253 ; relevant_extension(XT,_) -> format('--- ~w~n',[FullFile])),
1254 fail.
1255 traverse(Dir,AllFiles) :- directory_member_of_directory(Dir,_,SubDir),
1256 %format('~nSTART ~w~n',[SubDir]),
1257 traverse(SubDir,AllFiles),
1258 %format('~n END ~w~n',[SubDir]),
1259 fail.
1260 traverse(_,_).
1261
1262 relevant_extension('mch',b).
1263 relevant_extension('ref',b).
1264 relevant_extension('imp',b).
1265 relevant_extension('tla',tla).
1266 relevant_extension('fuzz',z).
1267 relevant_extension('tex',z).
1268 relevant_extension('csp',csp).
1269 relevant_extension('cspm',csp).
1270 relevant_extension('eventb',eventb).
1271
1272 % --------------------------
1273
1274 test_eval(quit) :- !,fail.
1275 test_eval(q) :- !,fail.
1276 test_eval(end_of_file) :- !,fail. % Ctrl-D
1277 test_eval(Cmd) :- test_eval1(Cmd),!.
1278 test_eval(Cmd) :-
1279 format_warning_nl('Error executing command: ~w',[Cmd]).
1280
1281 test_eval1(N) :- number(N),!, wall(run_id(N)).
1282 test_eval1(last) :- !, wall(run_last_test).
1283 test_eval1(N-M) :- number(N), number(M),!, wall(run_tests_by_id(N-M)).
1284 test_eval1('..'(N,M)) :- !, test_eval1(N-M).
1285 test_eval1(repeat(ID,M)) :- !, repeat_id(ID,M).
1286 test_eval1(r) :- !, run_random_tests(25).
1287 test_eval1(v) :- !,v.
1288 test_eval1(verbose) :- !,v.
1289 test_eval1(all_files) :- !, all_files(Files), length(Files,Len),
1290 format('~nFiles = ~n~w~n # Files = ~w~n',[Files,Len]).
1291 test_eval1(valid_files(Mode)) :- !, all_valid_files(Files,Mode), length(Files,Len),
1292 format('~nValid ~w Files = ~n~w~n # Files = ~w~n',[Mode,Files,Len]).
1293 test_eval1(files) :- !, traverse.
1294 test_eval1(files(Dir)) :- !, traverse(Dir).
1295 test_eval1(ex(Dir)) :- !, set_prob_examples_location(Dir).
1296 test_eval1(cache) :- !,print('Enabling cache'),nl,
1297 cache.
1298 test_eval1(debug) :- !,print('Enabling Prolog debugging mode (use -v or -vv for ProB debugging info)'),nl,
1299 debug,
1300 retractall(multiply_timeout(_)),
1301 assertz(multiply_timeout(10)).
1302 test_eval1(factor(X)) :- !,
1303 retractall(multiply_timeout(_)),
1304 format('Setting timeout factor to ~w~n',[X]),
1305 assertz(multiply_timeout(X)).
1306 test_eval1(timeout(X)) :- !,
1307 format('Adding -timeout ~w for model checking, disproving~nUse factor(X) to set TIME_OUT factor.~n',[X]),
1308 add_additional_arguments(['-timeout',X]).
1309 test_eval1(coverage) :- !,
1310 format('Adding -coverage to all commands.~n',[]),
1311 add_additional_arguments(['-coverage']).
1312 test_eval1(opreuse) :- !,
1313 format('Adding operation reuse to all commands.~n',[]),
1314 add_additional_arguments(['-p', 'OPERATION_REUSE',true]).
1315 test_eval1(opc) :- !,
1316 format('Adding operation reuse and compression to all commands.~n',[]),
1317 add_additional_arguments(['-p', 'COMPRESSION', 'TRUE', '-p', 'OPERATION_REUSE',true]).
1318 test_eval1(opcf) :- !,
1319 format('Adding operation reuse and compression to all commands.~n',[]),
1320 add_additional_arguments(['-p', 'COMPRESSION', 'TRUE', '-p', 'OPERATION_REUSE',full]).
1321 test_eval1(reset) :- !, reset_additional_arguments.
1322 test_eval1(logxml) :- !, File = './test_runner_logxml.xml',
1323 set_logxml_file(File).
1324 test_eval1(xsd) :- !, File = './test_runner_logxml.xml',
1325 set_logxml_file(File), set_use_logxml_schema.
1326 test_eval1(debug_off) :- !,print('Disabling Prolog debugging mode'),nl,
1327 nodebug,
1328 retractall(multiply_timeout(_)).
1329 test_eval1(fast) :- !,print('Enabling jvm_parser_fastrw'),nl,
1330 % TODO: only works at the very beginning; we need to be able to switch parser?
1331 add_additional_preference('jvm_parser_fastrw','true'),
1332 add_additional_preference('jvm_parser_force_parsing','true').
1333 test_eval1(force) :- !,print('Setting jvm_parser_force_parsing'),nl,
1334 add_additional_preference('jvm_parser_force_parsing','true').
1335 test_eval1(clpfd) :- !,print('Enabling CLPFD'),nl,
1336 add_additional_preference('CLPFD','TRUE').
1337 test_eval1(clpfd_off) :- !,print('Disabling CLPFD'),nl,
1338 add_additional_preference('CLPFD','FALSE').
1339 test_eval1(smt) :- !,print('Enabling SMT'),nl,
1340 add_additional_preference('SMT','TRUE').
1341 test_eval1(smt_off) :- !,print('Disabling SMT'),nl,
1342 add_additional_preference('SMT','FALSE').
1343 test_eval1(chr) :- !,print('Enabling CHR'),nl,
1344 add_additional_preference('CHR','TRUE').
1345 test_eval1(chr_off) :- !,print('Disabling CHR'),nl,
1346 add_additional_preference('CHR','FALSE').
1347 test_eval1(cse_off) :- !,print('Disabling CSE'),nl,
1348 add_additional_preference('CSE','FALSE').
1349 test_eval1(cse) :- !,print('Enabling CSE'),nl,
1350 add_additional_preference('CSE','TRUE').
1351 test_eval1(cse_subst) :- !,print('Enabling CSE_SUBST'),nl,
1352 add_additional_preference('CSE','TRUE'),
1353 add_additional_preference('CSE_SUBST','TRUE').
1354 test_eval1(trace_info) :- !,print('Enabling TRACE_INFO'),nl,
1355 add_additional_preference('TRACE_INFO','TRUE').
1356 %
1357 test_eval1(p(PREF)) :- !,
1358 get_preference_name(PREF,PName),
1359 print('Enabling Preference '),print(PName),nl,
1360 add_additional_preference(PName,'true').
1361 test_eval1(p(PREF,VAL)) :- !,
1362 get_preference_name(PREF,PName),
1363 print('Setting Preference '),print(PName),nl,
1364 add_additional_preference(PName,VAL).
1365 test_eval1(random) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl,
1366 add_additional_preference('RANDOMISE_ENUMERATION_ORDER','TRUE').
1367 test_eval1(random_off) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl,
1368 add_additional_preference('RANDOMISE_ENUMERATION_ORDER','FALSE').
1369 test_eval1(safe) :- !,print('Setting PROB_SAFE_MODE'),nl,
1370 run_safely.
1371 test_eval1(sanity_check) :- !, sanity_check(false).
1372 test_eval1(sc) :- !, sanity_check(false).
1373 test_eval1(trace) :- !, print('Enabling TRACE_UPON_ERROR'),nl,
1374 add_additional_preference('TRACE_UPON_ERROR','TRUE').
1375 test_eval1(trace_off) :- !, print('Disabling TRACE_UPON_ERROR'),nl,
1376 add_additional_preference('TRACE_UPON_ERROR','FALSE').
1377 test_eval1(raise) :- !,print('Enabling STRICT_RAISE_ENUM_WARNINGS'),nl,
1378 add_additional_preference('STRICT_RAISE_ENUM_WARNINGS','TRUE').
1379 test_eval1(nopt) :- !,print('Disabling OPTIMIZE_AST'),nl,
1380 add_additional_preference('OPTIMIZE_AST','FALSE').
1381 test_eval1(vv) :- !,vv.
1382 test_eval1(version(X)) :- !, print_version(X). %cpp, java, full, lib
1383 test_eval1(version) :- !, print_version(full_verbose).
1384 test_eval1(silent) :- !, run_silently.
1385 test_eval1(nostrict) :- !, run_no_strict.
1386 test_eval1(no_strict) :- !, run_no_strict.
1387 test_eval1(strict) :- !, retractall(no_strict_running).
1388 test_eval1(skip) :- !, (retract(skip_all_tests) -> X=false ; assertz(skip_all_tests),X=true),
1389 format('Marking all tests as skipped: ~w~n',[X]).
1390 test_eval1(x) :- !,halt.
1391 test_eval1(reload) :- !,use_module(probsrc(test_runner)), use_module(probsrc(testcases)).
1392 :- if(predicate_property(make, _)).
1393 test_eval1(make) :- !, make.
1394 :- else.
1395 test_eval1(make) :- !,
1396 print('make/0 is only supported on SWI-Prolog - reloading just the testcases instead.'),nl,
1397 test_eval1(reload).
1398 :- endif.
1399 test_eval1(edit) :- last_testcase_run(Id),
1400 cli_testcase(Id,_,_Infos,Arguments,_Comment),
1401 member(File,Arguments), safe_file_exists(File),!,
1402 edit_file(File).
1403 test_eval1(e) :- !, test_eval1(edit).
1404 test_eval1(diff) :- !, diff_in_editor.
1405 test_eval1(halt) :- !,halt.
1406 test_eval1(info) :- !, get_total_number_of_errors(X), format('~nTotal number of errors: ~w~n~n',[X]).
1407 test_eval1(cat) :- !, print('Categories: '),
1408 findall(Cat,
1409 (cli_testcase(_, TestCategories, _, _, _), member(Cat, TestCategories)), List),
1410 sort(List,SL), print(SL),nl,
1411 format('Type cat(Cat) or name of category to run it.~n',[]),
1412 format('Note: priv is an alias for private_source_not_available.~n',[]).
1413 test_eval1(all) :- !, wall(run_tests_all).
1414 test_eval1(cata) :- !, category_analysis.
1415 test_eval1(cat(Category)) :- !,
1416 wall(run_tests_by_category(Category,all)).
1417 test_eval1(priv) :- !, test_eval1(cat(private_source_not_available)).
1418 test_eval1(first(Category)) :- !,
1419 wall(run_tests_by_first_category(Category)).
1420 test_eval1(list(Category)) :- !,
1421 get_tests_for_categories([Category],TList),
1422 findall(Id,member(testcase(Id, _, _, _, _),TList),List),
1423 format('Tests for category: ~w~n',[List]).
1424 test_eval1(make(Categories)) :- !,
1425 wall(makefile_by_category(user_output,Categories)).
1426 test_eval1(make(File,Categories)) :- !,
1427 wall(makefile_by_category(File,Categories)).
1428 test_eval1(files(Category)) :- !, show_files(Category).
1429 test_eval1(uses(Command)) :- !, wall(run_tests_using_command(Command)).
1430 test_eval1(uses(Pref,Val)) :- !, wall(run_tests_using_preference(Pref,Val)).
1431 test_eval1(p) :- !, test_eval1(profile).
1432 test_eval1(ps) :- !, test_eval1(print_profile).
1433 test_eval1(pc) :- !, test_eval1(print_coverage).
1434 test_eval1(delta) :- !, print_delta_stats.
1435 test_eval1(stats) :- !, print_current_stats.
1436 test_eval1(statscsv) :- !, print_current_stats(user_output,',').
1437 test_eval1(start) :- !, performance_session_start.
1438 test_eval1(stop) :- !, performance_session_end('log/test_runner_performance_log.pl').
1439 test_eval1(codespeed) :- !,
1440 performance_session_start,
1441 test_eval1(cat(codespeed)),
1442 performance_session_end('log/test_runner_performance_codespeed_log.pl').
1443 test_eval1(prob_profile) :- !,
1444 cli_print_statistics(prob_profile),
1445 cli_print_statistics(disprover_profile).
1446 test_eval1(profile) :- !, print('PROFILING : '), %spy([avl:avl_size/2]),
1447 (current_prolog_flag(profiling,on)
1448 -> set_prolog_flag(profiling,off), print('OFF') ;
1449 set_prolog_flag(profiling,on), print('ON')),
1450 nl,print('USE ps to print_profile or pc to print_coverage info'),nl.
1451 test_eval1(profile_stats) :- !, test_eval1(print_profile).
1452 test_eval1(print_profile) :- !, nl,print('PROFILE INFORMATION:'), nl,
1453 catch(print_profile,
1454 error(existence_error(_,_),_),
1455 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1456 nl, debug:timer_statistics.
1457 test_eval1(print_coverage) :- !, nl,print('COVERAGE INFORMATION:'), nl,
1458 (current_prolog_flag(source_info,on) -> true ; format_warning_nl('Only useful when current_prolog_flag(source_info,on)!',[])),
1459 catch(print_coverage,
1460 error(existence_error(_,_),_),
1461 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1462 nl.
1463 test_eval1(profile_reset) :- !, nl,print('RESETTING PROFILE INFORMATION'), nl,
1464 catch(profile_reset,
1465 error(existence_error(_,_),_),
1466 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1467 nl.
1468 test_eval1(perf) :- !, toggle_perfmessages.
1469 test_eval1(Category) :- valid_category(Category),!,
1470 wall(run_tests_by_category(Category,all)).
1471 test_eval1(codespeed(Id)) :- !, codespeed_id(Id).
1472 test_eval1(cs(Id)) :- !, codespeed_id(Id).
1473 test_eval1('-'(ProBCLICommand)) :- atom(ProBCLICommand),
1474 atom_concat('-',ProBCLICommand,Atom),
1475 (prob_cli:recognised_option(Atom,Call) -> true % from probcli.pl
1476 ; format('Unknown probcli command: -~w~n',[ProBCLICommand]),fail),
1477 !,
1478 format('Executing probcli command ~w~n',[Call]),
1479 call(prob_cli:Call).
1480 test_eval1([H|T]) :- number(H),!, wall(run_tests_by_id([H|T],_,no_skipping)).
1481 test_eval1([Category|T]) :- valid_category(Category),!,
1482 wall(run_tests_by_category([Category|T],all)).
1483 test_eval1(C) :-
1484 (C=help -> true ; format('*** Unknown command ~w~n',[C])),
1485 print(' Commands: Nr, Nr-Nr, last, q, x, v, vv, uses(Cmd), uses(Pref,Val),'),nl,
1486 print(' repeat(id,nr), timeout(ms),factor(f), e,edit,diff, r (for 25 random tests),'),nl,
1487 print(' cat, cat(Cats),make(Cats),make(File,Cats),files(Cat),'),nl,
1488 print(' profile, profile_stats, (to turn Prolog profiling on and print info)'),nl,
1489 print(' debug,debug_off, (to turn Prolog debug mode on or off)'),nl,
1490 print(' perf, reload, sc,'),nl,
1491 print(' -CMD, (for probcli commands like -profile)'),nl,
1492 print(' * for setting preferences:'),nl,
1493 print(' p(PREF), p(PREF,VAL),'),nl,
1494 print(' clpfd,clpfd_off, smt,smt_off, chr,chr_off, cse,cse_subst,cse_off,'),nl,
1495 print(' random,random_off, trace_info, nopt,'),nl,
1496 print(' cache, perf, (turn ProB caching or performance messages on)'),nl,
1497 print(' trace,trace_off, (set TRACE_UPON_ERROR preference)'),nl,
1498 print(' * statistics:'),nl,
1499 print(' delta, stats, info.'),nl.
1500
1501 safe_file_exists(F) :- atom(F), file_exists(F).
1502
1503 valid_category(Cat) :-
1504 cli_testcase(_Id, TestCategories, _Infos, _Arguments, _Comment),
1505 member(Cat, TestCategories).
1506
1507
1508 % ---------------------------
1509
1510 % run test using binary
1511 % binaries are registered in a file probcli_bak/codespeed_versions.pl
1512 % results for test ID are written to probcli_bak/codespeed/res_ID.csv
1513
1514 :- use_module(probsrc(tools), [ajoin/2,ajoin_with_sep/3]).
1515 codespeed_id(Ids) :- codespeed_id(Ids,[min_date(date(2020,1,1))]).
1516 codespeed_id([],_) :- !.
1517 codespeed_id([H|T],Options) :- !, codespeed_id(H,Options),!, codespeed_id(T,Options).
1518 codespeed_id(Cat,Options) :- atom(Cat),!,
1519 get_tests_for_categories([Cat],List),
1520 codespeed_id(List,Options).
1521 codespeed_id(testcase(Id, _, _, _, _),Options) :- !, codespeed_id(Id,Options).
1522 codespeed_id(Id,Options) :-
1523 use_module('probcli_bak/codespeed_versions.pl'), % contains probcli_binary
1524 cli_testcase(Id,_TestCategories,_TestInfos,Arguments,Comment),
1525 format('Benchmarking test ~w using probcli binaries ~w~nTest Description: ~w~n',[Id,Options,Comment]),
1526 maplist(convert_arg_to_atom,Arguments,Args2),
1527 findall(probcli_binary(V1,V2,V3,F,Path,Sics,Date),
1528 get_probcli_binary_info(Options,V1,V2,V3,F,Path,_Hash,Sics,Date),Binaries),
1529 Repeats=3,
1530 maplist(bench_probcli_binary_testcase(Id,Args2,Repeats),Binaries,Walltimes,Oks),
1531
1532 ajoin(['codespeed/res_',Id,'.csv'],FName),
1533 absolute_file_name(FName,AF,[relative_to(probcli_bak)]),
1534 format('Writing codespeed results for ~w to ~w~n',[Id,AF]),
1535 open(AF,write,Stream),
1536 format(Stream,'\"Test ~w\"~n',[Id]),
1537 format(Stream,'\"~w\"~n',[Comment]),
1538 ajoin_with_sep(Arguments,' ',ArgsStr),
1539 format(Stream,'Command:,\"probcli ~w\"~n~n',[ArgsStr]),
1540 format(Stream,'\"~w.~w.~w-~w\",~w,~w,~w,~w,~w,~w,\"~w\"~n',[v1,v2,v3,f,sics,date,avg,min,max,ok,walltimes]),
1541 maplist(print_codespeed_results(Stream,Repeats),Binaries,Walltimes,Oks),
1542 close(Stream),
1543 maplist(print_codespeed_results(user_output,Repeats),Binaries,Walltimes,Oks).
1544
1545 get_probcli_binary_info(Options,V1,V2,V3,F,Path,Hash,Sics,Date) :-
1546 probcli_binary(V1,V2,V3,F,Path,Hash,Sics,Date), % from codespeed_versions.pl
1547 (( member(min_date(D2),Options), D2 @> Date
1548 ; member(max_date(D3),Options), D3 @< Date
1549 ; member(min_version(V12,V22,V32),Options), v(V12,V22,V32) @> v(V1,V2,V3)
1550 )
1551 -> format('Excluding version ~w.~w.~w-~w ~w~n',[V1,V2,V3,F,Options]),
1552 fail
1553 ; % format('Including version ~w.~w.~w-~w ~w~n',[V1,V2,V3,F,Options]),
1554 true).
1555
1556
1557 %:- use_module(library(statistics),[min_max/3]).
1558 print_codespeed_results(Stream,Repeats,probcli_binary(V1,V2,V3,F,_Path,sicstus(S1,S2,S3),date(Y,M,D)),Walltimes,Ok) :-
1559 sumlist(Walltimes,Sum),
1560 Average is Sum / Repeats,
1561 min_member(Min,Walltimes),
1562 max_member(Max,Walltimes),
1563 ajoin([S1,'.',S2,'.',S3],Sics),
1564 ajoin([Y,'/',M,'/',D],Date),
1565 format(Stream,'\"~w.~w.~w-~w\",~w,~w,~w,~w,~w,~w,\"~w\"~n',[V1,V2,V3,F,Sics,Date,Average,Min,Max,Ok,Walltimes]).
1566
1567 %bench_probcli_binary_testcase(Id,_Args,_Repeats,probcli_binary(V1,V2,V3,F,Path,_,_),Walltimes,Ok) :-
1568 % format('~nDry run test ~w using version ~w.~w.~w-~w (~w)~n',[Id,V1,V2,V3,F,Path]),!, Ok=skipped, Walltimes=[0].
1569 bench_probcli_binary_testcase(Id,Args,Repeats,probcli_binary(V1,V2,V3,F,Path,_,_),Walltimes,Ok) :-
1570 format('~nRunning test ~w using version ~w.~w.~w-~w (~w)~n',[Id,V1,V2,V3,F,Path]),
1571 run_probcli_binary_testcase(Id,Path,Args,_WT,Ok), % run once for parser
1572 rep_bench(Repeats,Id,Path,Args,Walltimes).
1573
1574 rep_bench(0,_,_,_,[]).
1575 rep_bench(Nr,Id,Path,Args,[WT1|WTR]) :- Nr>0,
1576 run_probcli_binary_testcase(Id,Path,Args,WT1,_),
1577 N1 is Nr-1,
1578 rep_bench(N1,Id,Path,Args,WTR).
1579
1580 :- use_module(probsrc(system_call), [system_call/5]).
1581 run_probcli_binary_testcase(Id,Path,Arguments,WT,Ok) :-
1582 statistics(walltime,[Start,_]),
1583 % absolute_file_name('probcli_bak/',BakPath),
1584 % atom_concat(BakPath,Path,Cmd),
1585 absolute_file_name(Path,Cmd,[relative_to(probcli_bak)]),
1586 format(' Test ~w :: ~w ~w~n',[Id,Cmd,Arguments]),
1587 system_call(Cmd,Arguments,_OutputText,ErrText,Exit),
1588 statistics(walltime,[Stop,_]), WT is Stop-Start,
1589 format(' Walltime: ~w ms; ~w~n',[WT,Exit]),
1590 (Exit=exit(0), ErrText = [] -> Ok=true
1591 ; format_error_nl('STD-ERROR (~w):~n~s',[Exit,ErrText]), Ok=false
1592 ).
1593
1594 % convert for system_call/process_create which do not accept numbers:
1595 convert_arg_to_atom(Nr,Atom) :- number(Nr), number_codes(Nr,C),!,atom_codes(Atom,C).
1596 convert_arg_to_atom(A,A).
1597
1598
1599 % --------------------
1600
1601 category_analysis :-
1602 format('Analysing test categories:~n',[]),
1603 findall(Cat-Id,(cli_testcase(Id,Categories,_,_Args,_Comm1),member(Cat,Categories)),L),
1604 sort(L,SL),
1605 keyclumped(SL,Groups), % Groups = [ Cat1 - [TestNr1, ...], Cat2 - [...], ...]
1606 maplist(print_cat_group,Groups).
1607
1608 nr_of_files(Cat,Id,Nr,DistinctNr) :-
1609 findall(File,(cli_testcase(Id,Categories,_,Args,_Comm1),
1610 member(Cat,Categories),
1611 file_in_arguments(File,Args)),Files),
1612 length(Files,Nr),
1613 sort(Files,SFile),
1614 length(SFile,DistinctNr).
1615
1616 print_cat_group(Cat-Tests) :-
1617 Tests = [First|T],
1618 nr_of_files(Cat,_,NrFiles,Distinct),
1619 (T=[]
1620 -> format(' ~w : 1 test : [~w] : ~w files, ~w distinct~n',[Cat,First,NrFiles,Distinct])
1621 ; length(Tests,Len),
1622 last(T,Last),
1623 format(' ~w : ~w tests : [~w .. ~w] : ~w files, ~w distinct~n',[Cat,Len,First,Last,NrFiles,Distinct])
1624 ).
1625