1 | % (c) 2009-2019 Lehrstuhl fuer Softwaretechnik und Programmiersprachen, | |
2 | % Heinrich Heine Universitaet Duesseldorf | |
3 | % This software is licenced under EPL 1.0 (http://www.eclipse.org/org/documents/epl-v10.html) | |
4 | ||
5 | :- ensure_loaded(prob_cli). % we call go_cli | |
6 | %:- use_module('../extensions/profiler/profiler.pl'). | |
7 | ||
8 | :- module(test_runner, [ | |
9 | test_repl/0, | |
10 | run_silently/0, | |
11 | run_last_test/0, last/0, | |
12 | run_tests_by_id/1, id/1, ids/1, | |
13 | run_tests_all/0, run_tests_all_except/1, | |
14 | run_tests_by_category/1, run_tests_by_category/2, category/1, | |
15 | run_tests_by_first_category/1, | |
16 | makefile_by_category/2, copy/1, | |
17 | generate_makefile/2, | |
18 | generate_makefile_all_tests/0, | |
19 | full_flush/0, | |
20 | jenkins_sanity_check/0, | |
21 | sanity_check/1, | |
22 | v/0, vv/0, | |
23 | cache/0, % enable caching | |
24 | all_categories/1, | |
25 | all_ids/1, | |
26 | ids_in_category/2, | |
27 | set_prob_examples_location/1 | |
28 | ]). | |
29 | ||
30 | % disable clauses are not together warning, because | |
31 | % cli_testcase/4 and cli_testcase/5 are sorted by id | |
32 | :- set_prolog_flag(discontiguous_warnings,off). | |
33 | ||
34 | %:- prolog_flag(compiling,_,debugcode). | |
35 | %:- prolog_flag(source_info,_,on). | |
36 | %:- prolog_flag(profiling,_,on). | |
37 | ||
38 | /* | |
39 | :- use_module('../extensions/profiler/profiler.pl'). % (adds the profiler himself) | |
40 | :- use_module('../extensions/profiler/profiler_te.pl'). % (adds the term expander) | |
41 | %:- use_module('../extensions/profiler/profiler_gui.pl'). % (adds the term expander) | |
42 | :- enable_profiling_all(b_expression_sharing). | |
43 | :- enable_profiling(bsyntaxtree:transform_bexpr_with_acc/5). | |
44 | :- enable_profiling(bsyntaxtree:transform_bexpr/3). | |
45 | :- enable_profiling(b_ast_cleanup:recompute_used_ids_info/2). | |
46 | :- enable_profiling(bsyntaxtree:sub_expression_contains_wd_condition/1). | |
47 | */ | |
48 | ||
49 | ||
50 | :- use_module(library(system)). | |
51 | :- use_module(library(lists)). | |
52 | :- use_module(library(sets), [intersect/2]). | |
53 | :- use_module(library(file_systems)). | |
54 | ||
55 | :- use_module(junit_tests,[print_junit/2,set_junit_dir/1,create_junit_result/6]). | |
56 | :- use_module(testcases). | |
57 | :- use_module(system_call). | |
58 | :- use_module(tools,[safe_number_codes/2,get_options/5]). | |
59 | :- use_module(tools_commands,[edit_file/1]). | |
60 | :- use_module(error_manager,[get_total_number_of_errors/1, get_all_errors/1]). | |
61 | :- use_module(performance_messages,[toggle_perfmessages/0]). | |
62 | ||
63 | :- use_module(module_information). | |
64 | :- module_info(group,testing). | |
65 | :- module_info(description,'This module runs the tests stored in testcases.pl.'). | |
66 | ||
67 | :- use_module(library(sets)). | |
68 | ||
69 | :- meta_predicate generate_makefile(-,1). | |
70 | generate_makefile(Filename,WriteMakeTargetPredicate) :- | |
71 | tell(Filename), | |
72 | all_categories(AllTestCategories), | |
73 | write('testtarget: '), | |
74 | write_subtargets(AllTestCategories), nl, nl, | |
75 | maplist(WriteMakeTargetPredicate,AllTestCategories), | |
76 | told. | |
77 | ||
78 | write_subtargets([G]) :- write(G). | |
79 | write_subtargets([G|Gs]) :- | |
80 | write(G), write(' '), | |
81 | write_subtargets(Gs). | |
82 | ||
83 | generate_makefile_all_tests :- | |
84 | generate_makefile('Makefile_All_Tests', write_makefile_target). | |
85 | write_makefile_target(G) :- | |
86 | % run tests where category G is the first category to avoid running tests multiple times | |
87 | % also avoids concurrency issues when test run multiple times in parallel with diff checking of output | |
88 | format('~w:\n\tsicstus -l src/test_runner.pl --goal "run_tests_by_first_category(\'~w\'), halt ; halt."',[G,G]),nl,nl. | |
89 | ||
90 | all_categories(X) :- | |
91 | findall(Groups, | |
92 | (cli_testcase(_Id,Groups,_Conf,_Args,_Desc), | |
93 | Groups \= [private_source_not_available|_]), % these tests are not checked in; cannot be run on Jenkins, ... | |
94 | List), | |
95 | append(List,ListOfGroups), | |
96 | list_to_set(ListOfGroups,X). | |
97 | ||
98 | all_ids(X) :- | |
99 | findall(Id, | |
100 | cli_testcase(Id,_Groups,_Conf,_Args,_Desc), | |
101 | List), | |
102 | list_to_set(List,X). | |
103 | ||
104 | ids_in_category(Category,IDs) :- | |
105 | findall(Id, | |
106 | (cli_testcase(Id,Groups,_Conf,_Args,_Desc),member(Category,Groups)), | |
107 | List), | |
108 | list_to_set(List,IDs). | |
109 | ||
110 | prob_junit(PROB_JUNIT) :- | |
111 | environ('PROB_JUNIT_DIR', Var), !, | |
112 | PROB_JUNIT = ['-junit', Var]. | |
113 | prob_junit([]). | |
114 | ||
115 | dontstop(Var) :- | |
116 | environ('DONTSTOP', Var2), !, Var=Var2. | |
117 | dontstop(false). | |
118 | ||
119 | halt1_allowed :- \+ repl_mode, | |
120 | prob_junit([]), dontstop(false). | |
121 | ||
122 | tests_multiply_timeout(Var) :- | |
123 | environ('TESTS_MULTIPLY_TIMEOUT', Var2), !, tools:arg_is_number(Var2,Var). | |
124 | tests_multiply_timeout(Var) :- multiply_timeout(Var). | |
125 | ||
126 | :- dynamic multiply_timeout/1. | |
127 | ||
128 | % call updir from within Spider, when you are in the src subdirectory | |
129 | set_cur_dir :- (directory_exists('./src') -> true /* we seem to be in the right directory */ | |
130 | ; directory_exists('./cia') -> current_directory(_Old,'./..') | |
131 | ; print('Please start tests from ProB directory or ProB/src directory'),nl), | |
132 | print('Current directory: '), | |
133 | current_directory(CD), print(CD),nl. | |
134 | ||
135 | reset_test_runner :- reset_nr_of_tests, set_cur_dir, | |
136 | retractall(test_skipped(_)), retractall(test_failed(_)), retractall(test_diff_failed(_,_,_)). | |
137 | ||
138 | ids(Nr) :- show_command(Nr). | |
139 | show_command(Nr) :- cli_testcase(Nr,_,_,L,Desc), | |
140 | print('# '), print(Desc),nl, l_pr([probcli|L]). | |
141 | l_pr([]) :- nl. | |
142 | l_pr([H|T]) :- print(' '),print(H), l_pr(T). | |
143 | ||
144 | :- volatile sanity_check_failed/0. | |
145 | :- dynamic sanity_check_failed/0. | |
146 | jenkins_sanity_check :- | |
147 | sanity_check(true), | |
148 | (sanity_check_failed -> halt(1) ; halt(0)). | |
149 | sanity_check(_Jenkins) :- | |
150 | cli_testcase(Id,Categories,_,Args,_Comm1), | |
151 | \+ member('-strict',Args), | |
152 | format_warning('*** test does not use -strict option: ',[]), print_test(Id,Categories),nl, | |
153 | assert(sanity_check_failed), fail. | |
154 | sanity_check(Jenkins) :- | |
155 | Jenkins == false, | |
156 | cli_testcase_diff_check_output(Id,_F1,_F2), | |
157 | cli_testcase(Id,Categories,_I1,_A1,_Comm1), | |
158 | (Categories = [_] | |
159 | -> true | |
160 | ; format_warning('*** multiple categories for test with output file; can generate race conditions when run in parallel: ',[]), | |
161 | print_test(Id,Categories), | |
162 | nl /* can generate race conditions when run in parallel */), | |
163 | assert(sanity_check_failed), fail. | |
164 | sanity_check(_Jenkins) :- | |
165 | cli_testcase_diff_check_output(Id,_F1,_F2), | |
166 | \+ (cli_testcase(Id,_Categories,_I1,_A1,_Comm1)), | |
167 | format_warning('*** no cli_testcase, but diff output defined for id: ~w~n',[Id]), | |
168 | assert(sanity_check_failed), fail. | |
169 | sanity_check(_Jenkins) :- | |
170 | cli_testcase(Id,C1,I1,A1,Comm1), | |
171 | cli_testcase(Id,C2,I2,A2,Comm2), | |
172 | [C1,I1,A1,Comm1] \= [C2,I2,A2,Comm2], | |
173 | format_warning('*** multiple cli_testcases for id: ~w~n',[Id]), | |
174 | assert(sanity_check_failed), fail. | |
175 | sanity_check(_Jenkins). | |
176 | ||
177 | ||
178 | format_warning(F,Args) :- format_colour(red,F,Args). | |
179 | format_colour(Col,F,Args) :- | |
180 | start_terminal_colour(Col,user_output), | |
181 | format(user_output,F,Args), | |
182 | reset_terminal_colour(user_output). | |
183 | ||
184 | print_test(Id,Comment) :- print(Id),print(' : '), print(Comment). | |
185 | ||
186 | % run the latest test (with highest id) | |
187 | last :- run_last_test. | |
188 | run_last_test :- get_last_test_id(Id), | |
189 | run_tests_by_id(Id). | |
190 | ||
191 | get_last_test_id(Id) :- set_last_test_id, last_test_id(Id). | |
192 | ||
193 | set_last_test_id :- | |
194 | retractall(last_test_id(_)), assert(last_test_id(-1)), | |
195 | cli_testcase(Id,C1,I1,A1,Comm1), | |
196 | ((cli_testcase(Id,C2,I2,A2,Comm2),(C1,I1,A1,Comm1) \= (C2,I2,A2,Comm2)) | |
197 | -> format_warning('~n*** multiple entries for test id: ~w~n',[Id]) | |
198 | ; true), | |
199 | update_last(Id), | |
200 | fail. | |
201 | set_last_test_id. | |
202 | ||
203 | :- volatile last_test_id/1. | |
204 | :- dynamic last_test_id/1. | |
205 | update_last(Id) :- number(Id),!, % debug_print(9,Id), debug_print(9,' '), | |
206 | retract(last_test_id(L)), | |
207 | (Id>L -> Max = Id ; Max = L), | |
208 | assert(last_test_id(Max)). | |
209 | update_last(Id) :- format_warning('~n*** test id not a number: ~w~n',[Id]). | |
210 | ||
211 | repeat_id(ID,Nr) :- repeat_id_aux(ID,Nr,info(ID,Nr,success),10000000,0). | |
212 | repeat_id_aux(ID,Nr,info(ID,Nr,Status1),Min,Max) :- Nr>0, !, N1 is Nr-1, | |
213 | statistics(walltime,[Start,_]), | |
214 | run_tests_by_id(ID,Status), (Status=success -> Status2=Status1 ; Status2=Status), | |
215 | statistics(walltime,[Stop,_]), | |
216 | WT is Stop - Start, | |
217 | NewMin is min(Min,WT), NewMax is max(Max,WT), | |
218 | repeat_id_aux(ID,N1,info(ID,Nr,Status2),NewMin,NewMax). | |
219 | repeat_id_aux(_,_,info(ID,Tot,Status),Min,Max) :- format('Test ~w repeated ~w times, Walltime Minumum = ~w ms, Maximum = ~w ms, Status=~w~n',[ID,Tot,Min,Max,Status]). | |
220 | ||
221 | % run a test with a specific id | |
222 | id(X) :- run_tests_by_id(X,_). | |
223 | run_tests_by_id(X) :- run_tests_by_id(X,_). | |
224 | run_tests_by_id(X,Status) :- reset_test_runner, | |
225 | run_tests_by_id_aux(X), | |
226 | check_failed(Status). | |
227 | run_tests_by_id_aux([]). | |
228 | run_tests_by_id_aux(From-To) :- !, | |
229 | (From>To -> true | |
230 | ; run_tests_by_id_aux(From), F1 is From+1, | |
231 | run_tests_by_id_aux(F1-To)). | |
232 | run_tests_by_id_aux([Id | Ids]) :- !, | |
233 | run_tests_by_id_aux(Id), | |
234 | run_tests_by_id_aux(Ids). | |
235 | run_tests_by_id_aux(Id) :- | |
236 | cli_testcase(Id,_TestCategories,TestInfos,Arguments,Comment), !, | |
237 | run_list([testcase(Id,TestInfos,Arguments,Comment)]). | |
238 | run_tests_by_id_aux(Id) :- | |
239 | \+(cli_testcase(Id,_Category,_Infos,_Arguments,_Comment)), !, | |
240 | format_warning('*** No testcase with ID ~w found~n', [Id]). | |
241 | ||
242 | :- use_module(library(random),[random_select/3]). | |
243 | run_random_tests(Nr) :- | |
244 | findall(testcase(Id,TestInfos,Arguments,Comment), | |
245 | cli_testcase(Id,_TestCategories,TestInfos,Arguments,Comment),AllTests), | |
246 | select_random_tests(Nr,AllTests,Tests), | |
247 | run_list(Tests). | |
248 | select_random_tests(N,_,[]) :- N<1,!. | |
249 | select_random_tests(_,[],[]) :- !. | |
250 | select_random_tests(N,All,[X|Tests]) :- | |
251 | random_select(X,All,Rest), | |
252 | N1 is N-1, | |
253 | select_random_tests(N1,Rest,Tests). | |
254 | ||
255 | ||
256 | run_tests_all :- run_tests_all_except([]). | |
257 | ||
258 | :- use_module(library(ordsets)). | |
259 | run_tests_all_except(Categories) :- reset_test_runner, | |
260 | sort(Categories,SC), | |
261 | cli_testcase(Id,TestCategories,Infos,Arguments,Comment), sort(TestCategories,TC), | |
262 | \+ ord_intersect(TC,SC), | |
263 | run_list([testcase(Id,Infos,Arguments,Comment)]), | |
264 | fail. | |
265 | run_tests_all_except(_) :- check_failed(_). | |
266 | ||
267 | ||
268 | % run all tests with a specific category | |
269 | run_silently :- add_additional_arguments(['-silent']). | |
270 | category(X) :- run_tests_by_category(X). | |
271 | run_tests_by_category(X) :- run_tests_by_category(X,all). | |
272 | run_tests_by_first_category(X) :- % used as entry in Jenkins when running all tests in parallel by category | |
273 | run_silently,run_tests_by_category(X,first). | |
274 | run_tests_by_category(X,FirstOnly) :- | |
275 | reset_test_runner, | |
276 | (is_list(X) -> run_tests_by_category_aux(X,FirstOnly) ; run_tests_by_category_aux([X],FirstOnly)), | |
277 | check_failed(_). | |
278 | run_tests_by_category_aux(Categories,FirstOnly) :- | |
279 | findall(testcase(Id, Infos, Arguments, Comment), | |
280 | (cli_testcase(Id, TestCategories, Infos, Arguments, Comment), | |
281 | (FirstOnly=first -> TestCategories=[C1|_], member(C1,Categories) | |
282 | ; intersect(Categories, TestCategories)) ), | |
283 | List), | |
284 | ( List=[] -> format_warning('*** No testcase with categories ~w found~n', [Categories]) | |
285 | ; otherwise -> run_list(List)). | |
286 | ||
287 | run_tests_using_command(Command) :- reset_test_runner, | |
288 | findall(testcase(Id, Infos, Arguments, Comment), | |
289 | (cli_testcase(Id, _TestCategories, Infos, Arguments, Comment), | |
290 | member(Command, Arguments)), | |
291 | List), | |
292 | ( List=[] -> format_warning('*** No testcase using command ~w found~n', [Command]) | |
293 | ; otherwise -> run_list(List), check_failed(_)). | |
294 | ||
295 | run_tests_using_preference(Pref,Val) :- reset_test_runner, | |
296 | findall(testcase(Id, Infos, Arguments, Comment), | |
297 | (cli_testcase(Id, _TestCategories, Infos, Arguments, Comment), | |
298 | append(_,['-p',Pref,Val|_],Arguments) ), | |
299 | List), | |
300 | ( List=[] -> format_warning('*** No testcase using preference ~w with value ~w found~n', [Pref,Val]) | |
301 | ; otherwise -> run_list(List), check_failed(_)). | |
302 | ||
303 | % Generate file list (for copying) of a category : | |
304 | show_files(Cat) :- | |
305 | cli_testcase(Id, TestCategories, _Infos, Arguments, _Comment), | |
306 | member(Cat, TestCategories), % print(Arguments),nl, | |
307 | ( file_in_arguments(File,Arguments) | |
308 | ; | |
309 | get_testcase_diff_check_output(Id,_GeneratedFile,File)), | |
310 | format('~w ',[File]), | |
311 | fail. | |
312 | show_files(_) :- nl. | |
313 | ||
314 | file_in_arguments(F,['-p',_,_|T]) :- !, file_in_arguments(F,T). | |
315 | file_in_arguments(F,[C,_|T]) :- binary_command(C),!, file_in_arguments(F,T). | |
316 | file_in_arguments(F,[H|T]) :- is_file(H),H=F ; file_in_arguments(F,T). | |
317 | binary_command(eval). binary_command(evalt). binary_command(evalf). | |
318 | binary_command('-mc'). | |
319 | is_file(F) :- atom(F), atom_codes(F,Codes), member(47,Codes),!. | |
320 | ||
321 | ||
322 | % Generate a Makefile for all listed Categories | |
323 | % example: makefile_by_category('AlstomMakefile',[animate,history,sptxt,rel_fnc]) | |
324 | % the Makefile can be run to perform the listed tests on a compiled version of probcli | |
325 | makefile_by_category(File, Categories) :- | |
326 | (is_list(Categories) -> Cats = Categories ; Cats = [Categories]), | |
327 | my_open(File, Stream), | |
328 | format(Stream, 'PROBCLI=probcli~n',[]), | |
329 | format(Stream, 'test_all:', []), | |
330 | makefile_write_categories(Stream, Cats), | |
331 | makefile_by_category_aux(Stream, Cats), | |
332 | my_close(Stream). | |
333 | makefile_by_category_aux(_Stream, []). | |
334 | makefile_by_category_aux(Stream, [Cat | Cats]) :- | |
335 | makefile_by_category_single(Stream, Cat), | |
336 | makefile_by_category_aux(Stream, Cats). | |
337 | makefile_by_category_single(Stream, Cat) :- | |
338 | findall(testcase(Id, Infos, Arguments, Comment), | |
339 | (cli_testcase(Id, TestCategories, Infos, Arguments, Comment), | |
340 | member(Cat, TestCategories)), | |
341 | List), | |
342 | (List=[] -> format_warning('*** No testcase with category ~w found~n', [Cat]) | |
343 | ; format(Stream, '~n~w:~n', [Cat]), makefile_write_calls(Stream, List)). | |
344 | ||
345 | my_open(user_output,S) :- !, S=user_output. | |
346 | my_open(File,S) :- open(File,write,S). | |
347 | my_close(user_output) :- !. | |
348 | my_close(S) :- close(S). | |
349 | ||
350 | makefile_write_categories(Stream, []) :- | |
351 | format(Stream, '~n', []). | |
352 | makefile_write_categories(Stream, [Cat | Cats]) :- | |
353 | format(Stream, ' ~a', [Cat]), | |
354 | makefile_write_categories(Stream, Cats). | |
355 | ||
356 | makefile_write_calls(_Stream, []). | |
357 | makefile_write_calls(Stream, [testcase(Id, _Infos, Arguments, Comment) | Tests]) :- | |
358 | (Comment='' -> true ; format(Stream,'\techo \"Test ~w : ~w\"~n',[Id,Comment])), | |
359 | format(Stream, '\t$(PROBCLI)', []), | |
360 | makefile_write_arguments(Stream, Arguments), | |
361 | makefile_write_diff(Stream, Id), | |
362 | makefile_write_calls(Stream, Tests). | |
363 | ||
364 | makefile_write_arguments(Stream, []) :- | |
365 | format(Stream, '~n', []). | |
366 | makefile_write_arguments(Stream, [Arg | Args]) :- | |
367 | format(Stream, ' ~w', [Arg]), | |
368 | (quote_next_arg(Arg) -> makefile_write_arguments_quoted(Stream,Args) | |
369 | ; makefile_write_arguments(Stream, Args)). | |
370 | ||
371 | quote_next_arg('-goal'). % this will contain spaces ,... surround in "" for shell | |
372 | quote_next_arg('--check_goal'). | |
373 | quote_next_arg('-check_goal'). | |
374 | quote_next_arg('-cbc_deadlock_pred'). | |
375 | quote_next_arg('-eval'). | |
376 | quote_next_arg('-evalt'). | |
377 | quote_next_arg('-evalf'). | |
378 | quote_next_arg('-cbc_sequence_with_target'). % actually quotes next two ! | |
379 | quote_next_arg('-cbc_sequence_with_target_all'). % ditto <- TO DO | |
380 | ||
381 | ||
382 | makefile_write_arguments_quoted(Stream, []) :- | |
383 | format(Stream, '~n', []). | |
384 | makefile_write_arguments_quoted(Stream, [Arg | Args]) :- | |
385 | format(Stream, ' \"~w\"', [Arg]), | |
386 | makefile_write_arguments(Stream, Args). | |
387 | ||
388 | makefile_write_diff(Stream, ID) :- | |
389 | get_testcase_diff_check_output(ID, File1, File2), | |
390 | format(Stream, '\tdiff -b ~w ~w~n', [File1, File2]), | |
391 | fail. | |
392 | makefile_write_diff(_Stream, _ID). | |
393 | ||
394 | % ------------------------- | |
395 | ||
396 | copy(Cat) :- (Cat=[_|_] -> C=Cat ; C=[Cat]), | |
397 | generate_copy_commands(C,'testarchive/'). | |
398 | ||
399 | :- use_module(b_trace_checking,[get_default_trace_file/2]). | |
400 | generate_copy_commands(Categories,Dest) :- | |
401 | cli_testcase(ID, TestCategories, _Infos, Arguments, _Comment), | |
402 | non_empty_inter(Categories,TestCategories), %print(ID),nl, | |
403 | Arguments=[MainFile|_], generate_copy_command(MainFile,Dest), % print(MainFile),nl, | |
404 | additional_testcase_file(ID,MainFile,Arguments,ExtraFile), | |
405 | generate_copy_command(ExtraFile,Dest), | |
406 | fail. | |
407 | generate_copy_commands(_,_). | |
408 | ||
409 | additional_testcase_file(ID,_,_,EFile) :- extra_testcase_file(ID,EFile). | |
410 | additional_testcase_file(_ID,File,Arguments,TFile) :- member('-t',Arguments), | |
411 | get_default_trace_file(File,TFile). | |
412 | additional_testcase_file(ID,_,_,RefFile2) :- get_testcase_diff_check_output(ID,_File1,RefFile2). | |
413 | ||
414 | non_empty_inter(A,B) :- member(X,A), member(X,B),!. | |
415 | ||
416 | :- use_module(tools_strings,[string_concatenate/3]). | |
417 | :- use_module(tools,[get_parent_directory/2]). | |
418 | generate_copy_command(File,Dest) :- | |
419 | safe_file_exists(File), | |
420 | get_parent_directory(File,Dir),!, | |
421 | string_concatenate(Dest,Dir,DestDir), | |
422 | string_concatenate(Dest,File,DestFile), | |
423 | format(user_output,'\tmkdir -p ~w~n',[DestDir]), | |
424 | get_command_path(mkdir,MkCmdPath), | |
425 | system_call(MkCmdPath, ['-p',DestDir],_Text1,_JExit1), | |
426 | format(user_output,'\tcp ~w ~w~n',[File,DestFile]), | |
427 | get_command_path(cp,CpCmdPath), | |
428 | system_call(CpCmdPath, [File,DestFile],_Text2,_JExit2). | |
429 | generate_copy_command(_,_). | |
430 | ||
431 | % ------------------------- | |
432 | ||
433 | :- dynamic test_failed/1, test_diff_failed/3, test_skipped/1. | |
434 | :- use_module(tools_printing,[start_terminal_colour/2, reset_terminal_colour/1]). | |
435 | check_failed(failure) :- test_failed(X),!,print_failed_tests, | |
436 | print('Use the following command to run individual tests: '),nl, | |
437 | print(' sicstus -l src/test_runner.pl --goal "id('),print(X),print(')."'),nl, | |
438 | (halt1_allowed -> halt(1) ; print('halt(1) :: junit / dontstop mode'),nl). | |
439 | check_failed(success) :- number_of_tests_run(Nr), | |
440 | start_terminal_colour([green,bold],user_output), | |
441 | findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips), | |
442 | (Nr=1,NrSkips=0 -> format(user_output,'Test successful.~n',[]) | |
443 | ; NrSkips>0 -> format(user_output,'All ~w tests successful, ~w skipped.~n',[Nr,NrSkips]) | |
444 | ; format(user_output,'All ~w tests successful.~n',[Nr])), | |
445 | reset_terminal_colour(user_output). | |
446 | print_failed_tests :- number_of_tests_run(Nr), | |
447 | findall(Y,test_failed(Y),Fails), length(Fails,NrFails), | |
448 | start_terminal_colour([red,bold],user_error), | |
449 | format(user_error,'** Tests run: ~w, failed: ~w **~n** Failed tests:~n',[Nr,NrFails]), | |
450 | test_failed(X), lookup_test_description(X,Desc), | |
451 | format(user_error,'~w ~w~n',[X,Desc]), | |
452 | fail. | |
453 | print_failed_tests :- nl(user_error), | |
454 | (user_interrupt_signal_received -> format(user_error,'Tests were interrupted by CTRL-C (user_interrupt)~n',[]) | |
455 | ; true), | |
456 | reset_terminal_colour(user_error). | |
457 | ||
458 | lookup_test_description(Id,Desc) :- cli_testcase(Id,_,_,_,Desc). | |
459 | ||
460 | :- dynamic user_interrupt_signal_received/0. | |
461 | run_list(List) :- retractall(user_interrupt_signal_received), | |
462 | length(List,Len), | |
463 | maplist(run_single_testcase_list_aux(Len),List). | |
464 | run_single_testcase_list_aux(Len,TC) :- | |
465 | (user_interrupt_signal_received | |
466 | -> TC=testcase(Id,_,_,_), assert(test_skipped(Id)) | |
467 | ; print_progress_stats(Len),run_single_testcase(TC)). | |
468 | ||
469 | print_progress_stats(All) :- number_of_tests_run(Nr), Nr>0,!, | |
470 | findall(Y,test_failed(Y),Fails), length(Fails,NrFails), | |
471 | findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips), | |
472 | format_colour([blue],'Progress: ~w/~w tests run, ~w skipped, ~w failed~n',[Nr,All,NrSkips,NrFails]). | |
473 | print_progress_stats(_). | |
474 | ||
475 | cache :- add_additional_arguments(['-cache','/Users/leuschel/svn_root/NewProB/examples/cache/']). | |
476 | v :- add_additional_arguments(['-v']). % verbose | |
477 | vv :- add_additional_arguments(['-vv']). % very_verbose | |
478 | ||
479 | :- volatile additional_arguments/1. | |
480 | :- dynamic additional_arguments/1. | |
481 | % add additional cli arguments when running tests: | |
482 | add_additional_arguments(List) :- | |
483 | (retract(additional_arguments(Old)) -> true ; Old=[]), | |
484 | append(Old,List,New), | |
485 | assert(additional_arguments(New)). | |
486 | ||
487 | % add additional preference when running tests: | |
488 | add_additional_preference(PREF,PREFVAL) :- | |
489 | (retract(additional_arguments(Old0)) -> remove_matching_pref(Old0,PREF,Old) ; Old=[]), | |
490 | New = ['-p',PREF,PREFVAL|Old], | |
491 | format('New additional arguments: ~w~n',[New]), | |
492 | assert(additional_arguments(New)). | |
493 | ||
494 | % remove all preferences conflicting with PREF | |
495 | remove_matching_pref([],_PREF,[]). | |
496 | remove_matching_pref([H|T],PREF,[H|TT]) :- non_pref_cli_arg(H),!, | |
497 | remove_matching_pref(T,PREF,TT). | |
498 | remove_matching_pref(['-p',P,OLD|T],PREF,Res) :- !, | |
499 | (P=PREF -> Res=T ; Res = ['-p',P,OLD|RT], remove_matching_pref(T,PREF,RT)). | |
500 | remove_matching_pref([H|T],PREF,[H|RT]) :- remove_matching_pref(T,PREF,RT). | |
501 | ||
502 | non_pref_cli_arg('-v'). | |
503 | non_pref_cli_arg('-vv'). | |
504 | non_pref_cli_arg('-silent'). | |
505 | ||
506 | % remove all preferencs conflicting with other list of prefs | |
507 | remove_matching_prefs([],P,P). | |
508 | remove_matching_prefs([H|T],InPrefs,Res) :- non_pref_cli_arg(H),!, | |
509 | remove_matching_prefs(T,InPrefs,Res). | |
510 | remove_matching_prefs(['-cache',_File|T],InPrefs,Res) :- !, | |
511 | remove_matching_prefs(T,InPrefs,Res). | |
512 | remove_matching_prefs(['-p',PREF,_|T],InPrefs,Res) :- | |
513 | remove_matching_pref(InPrefs,PREF,In2), | |
514 | remove_matching_prefs(T,In2,Res). | |
515 | ||
516 | ||
517 | % RUNNING SINGLE TESTCASE: | |
518 | % ------------------------ | |
519 | run_single_testcase(testcase(Id,Infos,Arguments,_Comment)) :- | |
520 | skip_test(Infos), !, | |
521 | full_flush, | |
522 | format_colour([blue,bold],'Skipping test ~w ~n', [Id]), print_junit_skip(Arguments), | |
523 | assert(test_skipped(Id)), | |
524 | full_flush. | |
525 | run_single_testcase(testcase(Id,_Infos,Arguments,Comment)) :- | |
526 | full_flush, | |
527 | format_colour([blue],'Running test ~w ~n ~w~n',[Id,Comment]), | |
528 | print('testcase: probcli '), print_args(Arguments),nl, | |
529 | clear_diff_output(Id), | |
530 | prob_junit(JUnit), | |
531 | maplist(patch_prob_examples_loc,Arguments,Arguments0), % update path to prob_examples if necessary | |
532 | append(Arguments0, JUnit, Arguments1), | |
533 | (additional_arguments(ExtraArgs) | |
534 | -> remove_matching_prefs(ExtraArgs,Arguments1,RemArguments1), % remove conflicting arguments now overriden | |
535 | append(ExtraArgs,RemArguments1,Arguments2) | |
536 | ; Arguments1=Arguments2), | |
537 | (tests_multiply_timeout(Factor) -> modify_timeout(Factor,Arguments2,Arguments3) ; Arguments3 = Arguments2), | |
538 | print('executed: probcli '), print_args(Arguments3),nl, | |
539 | full_flush, | |
540 | test_started(Id), | |
541 | catch(user:go_cli(Arguments3), Exception, true), !, | |
542 | test_finished(Id), | |
543 | ( | |
544 | Exception == halt(0) -> check_diff_output(Id) ; | |
545 | var(Exception) -> check_diff_output(Id) ; | |
546 | Exception = error(X,Y) -> (halt1_allowed -> format_warning('error on test execution: error(~w,~w)',[X,Y]), halt(1) ; test_failed(Id,Exception)) ; | |
547 | Exception = enumeration_warning(_,_,_,_,_) -> (halt1_allowed -> format_warning('exception on test execution: ~w)',[Exception]), halt(1) ; test_failed(Id,Exception)) ; | |
548 | Exception = solver_and_provers_too_weak -> (halt1_allowed -> format_warning('exception on test execution: ~w)',[Exception]), halt(1) ; test_failed(Id,Exception)) ; | |
549 | Exception == halt(1) -> test_failed(Id, Exception) ; | |
550 | Exception == parse_errors(_PE) -> test_failed(Id, Exception) ; | |
551 | Exception == user_interrupt_signal -> assert(user_interrupt_signal_received), | |
552 | format_warning('CTRL-C received, aborting tests~n',[]), | |
553 | test_failed(Id, Exception) ; | |
554 | otherwise -> test_failed(Id, Exception), format_warning('Exception not caught in test_runner: ~w~n',[Exception]), halt(1) | |
555 | ), !, | |
556 | (test_failed(Id) -> true % messages already printed above | |
557 | ; format_colour([green],'Test ~w completed successfully~n~n',[Id])), | |
558 | user:reset_cli, | |
559 | full_flush. | |
560 | ||
561 | :- dynamic test_took_aux/1. | |
562 | :- dynamic last_testcase_run/1, number_of_tests_run/1. | |
563 | number_of_tests_run(0). | |
564 | reset_nr_of_tests :- retractall(number_of_tests_run(_)), assert(number_of_tests_run(0)). | |
565 | ||
566 | :- use_module(library(system),[now/1, datime/2]). | |
567 | :- dynamic performance_session_running/1, performance_session_stats/4. | |
568 | ||
569 | performance_session_start :- | |
570 | now(When), | |
571 | datime(When,datime(Year,Month,Day,Hour,Min,Sec)), | |
572 | format('~nStarting Codespeed Performance Monitoring session ~w:~w:~w:~w:~w:~w~n',[Year,Month,Day,Hour,Min,Sec]), | |
573 | retractall(performance_session_running(_)), | |
574 | assert(performance_session_running(When)). | |
575 | ||
576 | :- use_module(parsercall,[get_parser_version/1]). | |
577 | :- use_module(version, [version_str/1, revision/1, lastchangeddate/1]). | |
578 | performance_session_end(FilePrefix) :- | |
579 | performance_session_running(When), | |
580 | datime(When,datime(Year,Month,Day,Hour,Min,Sec)), | |
581 | %tools:ajoin([FilePrefix,':',Year,Month,Day,Hour,Min,Sec],FileName), | |
582 | format('~nFinishing Codespeed session ~w:~w:~w:~w:~w:~w~n -> File : ~w~n',[Year,Month,Day,Hour,Min,Sec,FilePrefix]), | |
583 | open(FilePrefix,append,S), | |
584 | format(S,'~n/* Codespeed session ~w:~w:~w:~w:~w:~w */~n',[Year,Month,Day,Hour,Min,Sec]), | |
585 | version_str(Vers), portray_clause(S, session_prob_version(When,Vers)), | |
586 | revision(Rev), portray_clause(S, session_prob_revision(When,Rev)), | |
587 | lastchangeddate(DD), portray_clause(S, session_prob_lastchangeddate(When,DD)), | |
588 | get_parser_version(PV), portray_clause(S, session_prob_parser_version(When,PV)), | |
589 | write_perf_data(When,S). | |
590 | ||
591 | write_perf_data(When,S) :- additional_arguments(New), | |
592 | portray_clause(S, stored_additional_arguments(When,New)), | |
593 | fail. | |
594 | write_perf_data(When,S) :- performance_session_stats(When,Id,Time,WTime), | |
595 | portray_clause(S, stored_performance_test_stats(When,Id,Time,WTime)), | |
596 | fail. | |
597 | write_perf_data(_When,S) :- nl(S), nl(S), close(S). | |
598 | ||
599 | ||
600 | test_started(Id) :- | |
601 | retractall(last_testcase_run(_)), assert(last_testcase_run(Id)), | |
602 | retractall(test_took_aux(_)), | |
603 | statistics(runtime,[Start,_]), | |
604 | statistics(walltime,[WStart,_]), | |
605 | bb_put(test_started,Start), | |
606 | bb_put(test_started_wtime,WStart). | |
607 | ||
608 | :- dynamic test_stats/5. | |
609 | test_finished(Id) :- | |
610 | statistics(runtime,[End,_]), | |
611 | statistics(walltime,[WEnd,_]), | |
612 | bb_get(test_started,Start), | |
613 | bb_get(test_started_wtime,WStart), | |
614 | Time is End - Start, WTime is WEnd- WStart, | |
615 | (retract(number_of_tests_run(Nr)) -> N1 is Nr+1 ; N1=1), | |
616 | assert(number_of_tests_run(N1)), | |
617 | (retract(test_stats(Id,PrevTime,PrevWTime,_,_)) | |
618 | -> assert(test_stats(Id,Time,WTime,PrevTime,PrevWTime)) | |
619 | ; assert(test_stats(Id,Time,WTime,-1,-1)) | |
620 | ), | |
621 | (performance_session_running(When) | |
622 | -> assert(performance_session_stats(When,Id,Time,WTime)) | |
623 | ; true). | |
624 | ||
625 | print_delta_stats :- print('Comparing walltimes with previous test run: '),nl, | |
626 | findall(delta(DeltaPerc,DeltaWTime,Id),test_delta_stat(Id,DeltaPerc,DeltaWTime),L), | |
627 | (L=[] -> print('No previous run information available'),nl | |
628 | ; print(' ID | % (delta absolute) | walltime (runtime)~n'),nl, | |
629 | sort(L,SL), | |
630 | maplist(print_delta,SL)). | |
631 | test_delta_stat(Id,DeltaPerc,DeltaWTime) :- | |
632 | test_stats(Id,_RTime,WTime,_PrevRTime,PrevWTime), | |
633 | PrevWTime>0, | |
634 | DeltaWTime is WTime - PrevWTime, | |
635 | DeltaPerc is (100*DeltaWTime) / PrevWTime. | |
636 | print_delta(delta(DeltaPerc,DeltaWTime,Id)) :- | |
637 | test_stats(Id,RTime,WTime,_PrevRTime,PrevWTime), | |
638 | format(' ~w | ~2f % (~w ms) | ~w ms (~w ms runtime) [~w walltime ms previously]~n', | |
639 | [Id,DeltaPerc,DeltaWTime,WTime,RTime,PrevWTime]). | |
640 | ||
641 | :- use_module(tools,[print_memory_used_wo_gc/0]). | |
642 | print_current_stats :- | |
643 | print_memory_used_wo_gc,nl, | |
644 | format(' ID | OK | WALLTIME | RUNTIME~n',[]), | |
645 | test_stats(Id,RTime,WTime,_PrevRTime,_PrevWTime), | |
646 | (test_failed(Id) -> OK = '*FAILED*' ; OK = ' OK '), | |
647 | format(' ~w | ~w | ~w ms | ~w ms runtime~n', | |
648 | [Id,OK,WTime,RTime]), | |
649 | fail. | |
650 | print_current_stats. | |
651 | ||
652 | test_took(X) :- test_took_aux(X), !. | |
653 | test_took(XInSeconds) :- | |
654 | statistics(runtime,[End,_]), | |
655 | bb_get(test_started,Start), | |
656 | X is End - Start, | |
657 | XInSeconds is X / 1000, | |
658 | asserta(test_took_aux(XInSeconds)). | |
659 | ||
660 | print_junit_skip(Arguments) :- | |
661 | prob_junit(['-junit', Dir]) | |
662 | -> set_junit_dir(Dir), test_took(X), | |
663 | create_junit_result(Arguments,'Integration Tests','Integration_Test',X,skip,Result), | |
664 | print_junit([Result],'Integration_Test') | |
665 | ; true. | |
666 | ||
667 | % if the test expects a time_out error, the timeout is not expanded | |
668 | % otherwise, timeout is increased to allow coverage analysis / junit / etc to finish | |
669 | modify_timeout(_,OldOptions,OldOptions) :- segment(OldOptions,['-expcterr','time_out']), !. | |
670 | modify_timeout(Factor,[],['-p','TIME_OUT',NVal]) :- % timeout was not set at all - set it to Factor*Default | |
671 | % Note: there is a potential problem when the time_out is set inside the machine and not in the test !! TO DO: fix | |
672 | preferences:preference_default_value(time_out,DEFAULT), | |
673 | NVal is Factor * DEFAULT. | |
674 | modify_timeout(Factor,['-p','TIME_OUT',OLD|T],['-p','TIME_OUT',NewT|T]) :- | |
675 | tools:arg_is_number(OLD,OLDT), !, | |
676 | % preferences:preference_default_value(time_out,DEFAULT), | |
677 | % OLDT < DEFAULT,!, % we explicitly set a TIME_OUT lower than the default value | |
678 | NewT is OLDT * Factor. | |
679 | modify_timeout(Factor,[H|T],[H|MT]) :- modify_timeout(Factor,T,MT). | |
680 | ||
681 | full_flush :- flush_output(user_output), flush_output(user_error). | |
682 | ||
683 | print_args([]). | |
684 | print_args([H|T]) :- print(H), print(' '), print_args(T). | |
685 | ||
686 | test_failed(Id,Msg) :- | |
687 | test_failed(Id,Msg,''). | |
688 | test_failed(Id,Msg1,Msg2) :- | |
689 | test_took(X), | |
690 | cli_testcase(Id,Categories,_Infos,Arguments,Comment), | |
691 | Categories = [FirstCat|_], !, | |
692 | ErrorMessage = ['Test with Id',Id,'failed.','Test Categories: ',Categories,'Test Arguments: ',Arguments,'Test Comment: ',Comment,'Error Messages in Test Runner: ',Msg1,Msg2,'Content of Error Manager: '], | |
693 | (get_all_errors(AllErrors) -> true ; AllErrors = []), | |
694 | append(ErrorMessage,AllErrors,FullErrorMessage), | |
695 | create_junit_result(Id,'Integration Tests',FirstCat,X,error(FullErrorMessage),Result), | |
696 | print_junit([Result],'Integration_Test'), | |
697 | assert(test_failed(Id)), | |
698 | start_terminal_colour(red,user_error), | |
699 | format(user_error,'*** Test ~w FAILED: ~w~w~n', [Id,Msg1,Msg2]), | |
700 | reset_terminal_colour(user_error). | |
701 | ||
702 | ||
703 | diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText) :- | |
704 | test_took(X), | |
705 | atom_codes(DiffOutputAtom,DiffOutputText), | |
706 | atom_codes(CmpOutputAtom,CmpOutputText), | |
707 | ErrMsg = ['Diff for test with Id\n',Id,'\nfailed:\n','Output file\n',F1,'\ndoes not correspond to stored version\n',F2, | |
708 | '\nOutput of Diff:\n',DiffOutputAtom, | |
709 | '\nOutput of Cmp:\n',CmpOutputAtom], | |
710 | create_junit_result(Id,'Diff Checking','Diff_checking',X, | |
711 | error(ErrMsg), | |
712 | Result), | |
713 | print_junit([Result],'Diff_checking'), | |
714 | assert(test_failed(Id)), | |
715 | assert(test_diff_failed(Id,F1,F2)), | |
716 | start_terminal_colour(red,user_error), | |
717 | format(user_error,'*** Test ~w FAILED: Diff failed:~nOutput file ~w~ndoes not correspond to stored version~n~w~n', [Id,F1,F2]), | |
718 | format(user_error,'Diff:~n~s~n',[DiffOutputText]), | |
719 | format(user_error,'Cmp:~n~s~n',[CmpOutputText]), | |
720 | reset_terminal_colour(user_error). | |
721 | ||
722 | :- use_module(tools_commands,[diff_files_with_editor/2]). | |
723 | diff_in_editor :- findall(I,test_diff_failed(I,_,_),LI), sort(LI,SI), | |
724 | format('Opening failed diff files in editor: ~w~n',[SI]), | |
725 | test_diff_failed(Id,F1,F2), | |
726 | format('Test ~w~n~w ~w~n',[Id,F1,F2]), | |
727 | diff_files_with_editor(F1,F2), | |
728 | fail. | |
729 | diff_in_editor. | |
730 | ||
731 | ||
732 | clear_diff_output(Id) :- % clear all files that should be generated | |
733 | ? | get_testcase_diff_check_output(Id,GeneratedFile,_StoredReferenceFile), |
734 | safe_file_exists(GeneratedFile), | |
735 | (cli_testcase_do_not_delete(Id,GeneratedFile) -> format(user_output,'% Keeping: ~w~n',[GeneratedFile]) | |
736 | ; format(user_output,'% Deleting: ~w~n',[GeneratedFile]), | |
737 | delete_file(GeneratedFile) | |
738 | ),fail. | |
739 | clear_diff_output(_). | |
740 | ||
741 | check_diff_output(Id) :- | |
742 | findall(diff(Id,GeneratedFile,StoredReferenceFile), | |
743 | get_testcase_diff_check_output(Id,GeneratedFile,StoredReferenceFile), | |
744 | ListOfDiffsToCheck), | |
745 | maplist(check_diff_output2, ListOfDiffsToCheck). | |
746 | ||
747 | check_diff_output2(diff(Id,GeneratedFile,StoredReferenceFile)) :- | |
748 | \+ safe_file_exists(GeneratedFile) -> test_failed(Id,'Output file does not exist:',GeneratedFile) ; | |
749 | \+ safe_file_exists(StoredReferenceFile) -> test_failed(Id,'Stored file does not exist:',StoredReferenceFile) ; | |
750 | otherwise -> diff(Id,GeneratedFile,StoredReferenceFile). | |
751 | ||
752 | diff(Id,F1,F2) :- | |
753 | format(user_output,'% Checking: diff / cmp ~w ~w~n',[F1,F2]), | |
754 | get_command_path(diff,DiffPath), | |
755 | get_command_path(cmp,CmpPath), | |
756 | (system_call(DiffPath,['-b',F1,F2],DiffOutputText,_ErrTextDiff,ExitDiff) % use -q for quiet | |
757 | -> true | |
758 | ; DiffOutputText = "*** CALLING DIFF FAILED !", ExitDiff = fail | |
759 | ), | |
760 | format(user_output,'% Checking: cmp ~w ~w~n',[F1,F2]), | |
761 | (system_call(CmpPath,['-b',F1,F2],CmpOutputText,_ErrTextCmp,_ExitCmp) | |
762 | -> true | |
763 | ; CmpOutputText = "*** CALLING CMP FAILED !" | |
764 | ), | |
765 | (ExitDiff = exit(0)%, ExitCmp = exit(0) | |
766 | -> true | |
767 | ; diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText)). | |
768 | ||
769 | :- volatile repl_mode/0. | |
770 | :- dynamic repl_mode/0. | |
771 | ||
772 | :- dynamic prob_examples_location/1. | |
773 | set_prob_examples_location(Dir) :- | |
774 | retractall(prob_examples_location(_)), | |
775 | (atom(Dir) -> atom_codes(Dir,DC) ; DC=Dir), | |
776 | format('Setting location of prob_examples directory to: ~s~n',[DC]), | |
777 | assert(prob_examples_location(DC)). | |
778 | ||
779 | % update path to prob_examples if necessary: | |
780 | patch_prob_examples_loc(Arg,PatchedArg) :- atom(Arg), | |
781 | prob_examples_location(NewLoc), | |
782 | atom_codes(Arg,ArgC), | |
783 | append("../prob_examples",Rest,ArgC), | |
784 | !, | |
785 | append(NewLoc,Rest,NewArgC), | |
786 | atom_codes(PatchedArg,NewArgC). | |
787 | patch_prob_examples_loc(A,A). | |
788 | ||
789 | get_testcase_diff_check_output(Id,PF1,PF2) :- | |
790 | ? | cli_testcase_diff_check_output(Id,F1,F2), |
791 | patch_prob_examples_loc(F1,PF1), | |
792 | patch_prob_examples_loc(F2,PF2). | |
793 | ||
794 | :- use_module(library(lists),[maplist/2]). | |
795 | % a minimal shell to execute tests: | |
796 | test_repl :- assert(repl_mode), | |
797 | prolog_flag(argv,ArgV), treat_argv(ArgV), | |
798 | test_repl_loop, | |
799 | retractall(repl_mode). | |
800 | ||
801 | treat_argv(['-prob-examples',Dir|T]) :- !, set_prob_examples_location(Dir), | |
802 | treat_argv(T). | |
803 | treat_argv(Args) :- maplist(eval_argv,Args). | |
804 | ||
805 | % execute tests provided on the command-line: | |
806 | eval_argv(Cmd) :- format('ARGV ==> ~w~n',[Cmd]), | |
807 | atom_codes(Cmd,C), safe_number_codes(Nr,C), !, test_eval(Nr). | |
808 | eval_argv(Cmd) :- test_eval(Cmd),!. | |
809 | ||
810 | test_repl_loop :- print('TEST ==> '),read(T), test_eval(T), !, test_repl_loop. | |
811 | test_repl_loop. | |
812 | ||
813 | :- meta_predicate wall(0). | |
814 | wall(Call) :- | |
815 | statistics(walltime,[Start,_]), | |
816 | call(Call), | |
817 | statistics(walltime,[Stop,_]), WT is Stop-Start, | |
818 | format('Walltime: ~w ms~n',[WT]). | |
819 | ||
820 | % ------------------------- | |
821 | ||
822 | :- use_module(library(file_systems)). | |
823 | test_file(Id,File,AbsFileName) :- cli_testcase(Id,_Cat,_Infos,Arguments,_Comment), | |
824 | get_options(Arguments,user:recognised_cli_option,_Options,Files,fail), | |
825 | member(File,Files), | |
826 | is_existing_file(File), | |
827 | absolute_file_name(File,AbsFileName). | |
828 | ||
829 | is_existing_file(X) :- \+ number(X), atom(X), | |
830 | atom_codes(X,Codes),[BS] = "/", (member(BS,Codes) -> true), | |
831 | file_exists(X). | |
832 | ||
833 | all_files(Files) :- findall(F,test_file(_,_,F),A), sort(A,Files). | |
834 | ||
835 | traverse :- traverse('../prob_examples/public_examples/'). | |
836 | traverse(SD) :- all_files(Files), absolute_file_name(SD,StartDir), | |
837 | format('Examining files in ~w~n + means file is used in some test~n~n',[StartDir]), | |
838 | traverse(StartDir,Files). | |
839 | ||
840 | traverse(Dir,AllFiles) :- file_member_of_directory(Dir,_,FullFile), | |
841 | tools:get_filename_extension(FullFile,XT), | |
842 | (member(FullFile,AllFiles) -> format(' + ~w~n',[FullFile]) | |
843 | ; relevant_extension(XT) -> format('--- ~w~n',[FullFile])), | |
844 | fail. | |
845 | traverse(Dir,AllFiles) :- directory_member_of_directory(Dir,_,SubDir), | |
846 | %format('~nSTART ~w~n',[SubDir]), | |
847 | traverse(SubDir,AllFiles), | |
848 | %format('~n END ~w~n',[SubDir]), | |
849 | fail. | |
850 | traverse(_,_). | |
851 | ||
852 | relevant_extension('mch'). | |
853 | relevant_extension('ref'). | |
854 | relevant_extension('imp'). | |
855 | relevant_extension('tla'). | |
856 | relevant_extension('fuzz'). | |
857 | relevant_extension('csp'). | |
858 | relevant_extension('cspm'). | |
859 | relevant_extension('eventb'). | |
860 | % -------------------------- | |
861 | ||
862 | test_eval(N) :- number(N),!, wall(id(N)). | |
863 | test_eval(last) :- !, wall(run_last_test). | |
864 | test_eval(N-M) :- number(N), number(M),!, wall(id(N-M)). | |
865 | test_eval(repeat(ID,M)) :- !, repeat_id(ID,M). | |
866 | test_eval(r) :- !, run_random_tests(25). | |
867 | test_eval(v) :- !,v. | |
868 | test_eval(verbose) :- !,v. | |
869 | test_eval(all_files) :- !, all_files(Files), format('~nFiles = ~n~w~n',[Files]). | |
870 | test_eval(files) :- !, traverse. | |
871 | test_eval(files(Dir)) :- !, traverse(Dir). | |
872 | test_eval(ex(Dir)) :- !, set_prob_examples_location(Dir). | |
873 | test_eval(cache) :- !,print('Enabling cache'),nl, | |
874 | cache. | |
875 | test_eval(debug) :- !,print('Enabling Prolog debugging mode (use -v or -vv for ProB debugging info)'),nl, | |
876 | debug, | |
877 | retractall(multiply_timeout(_)), | |
878 | assert(multiply_timeout(10)). | |
879 | test_eval(timeout(X)) :- !, | |
880 | retractall(multiply_timeout(_)), | |
881 | assert(multiply_timeout(X)). | |
882 | test_eval(debug_off) :- !,print('Disabling Prolog debugging mode'),nl, | |
883 | nodebug, | |
884 | retractall(multiply_timeout(_)). | |
885 | test_eval(clpfd) :- !,print('Enabling CLPFD'),nl, | |
886 | add_additional_preference('CLPFD','TRUE'). | |
887 | test_eval(clpfd_off) :- !,print('Disabling CLPFD'),nl, | |
888 | add_additional_preference('CLPFD','FALSE'). | |
889 | test_eval(smt) :- !,print('Enabling SMT'),nl, | |
890 | add_additional_preference('SMT','TRUE'). | |
891 | test_eval(smt_off) :- !,print('Disabling SMT'),nl, | |
892 | add_additional_preference('SMT','FALSE'). | |
893 | test_eval(chr) :- !,print('Enabling CHR'),nl, | |
894 | add_additional_preference('CHR','TRUE'). | |
895 | test_eval(chr_off) :- !,print('Disabling CHR'),nl, | |
896 | add_additional_preference('CHR','FALSE'). | |
897 | test_eval(cse_off) :- !,print('Disabling CSE'),nl, | |
898 | add_additional_preference('CSE','FALSE'). | |
899 | test_eval(cse) :- !,print('Enabling CSE'),nl, | |
900 | add_additional_preference('CSE','TRUE'). | |
901 | test_eval(cse_subst) :- !,print('Enabling CSE_SUBST'),nl, | |
902 | add_additional_preference('CSE','TRUE'), | |
903 | add_additional_preference('CSE_SUBST','TRUE'). | |
904 | test_eval(trace_info) :- !,print('Enabling TRACE_INFO'),nl, | |
905 | add_additional_preference('TRACE_INFO','TRUE'). | |
906 | test_eval(p(PREF)) :- !,print('Enabling Preference '),print(PREF),nl, | |
907 | add_additional_preference(PREF,'TRUE'). | |
908 | test_eval(p(PREF,VAL)) :- !,print('Setting Preference '),print(PREF),nl, | |
909 | add_additional_preference(PREF,VAL). | |
910 | test_eval(random) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl, | |
911 | add_additional_preference('RANDOMISE_ENUMERATION_ORDER','TRUE'). | |
912 | test_eval(random_off) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl, | |
913 | add_additional_preference('RANDOMISE_ENUMERATION_ORDER','FALSE'). | |
914 | test_eval(sanity_check) :- !, sanity_check(false). | |
915 | test_eval(sc) :- !, sanity_check(false). | |
916 | test_eval(trace) :- !, print('Enabling TRACE_UPON_ERROR'),nl, | |
917 | add_additional_preference('TRACE_UPON_ERROR','TRUE'). | |
918 | test_eval(trace_off) :- !, print('Disabling TRACE_UPON_ERROR'),nl, | |
919 | add_additional_preference('TRACE_UPON_ERROR','FALSE'). | |
920 | test_eval(raise) :- !,print('Enabling STRICT_RAISE_ENUM_WARNINGS'),nl, | |
921 | add_additional_preference('STRICT_RAISE_ENUM_WARNINGS','TRUE'). | |
922 | test_eval(nopt) :- !,print('Disabling OPTIMIZE_AST'),nl, | |
923 | add_additional_preference('OPTIMIZE_AST','FALSE'). | |
924 | test_eval(vv) :- !,vv. | |
925 | test_eval(silent) :- !,add_additional_arguments(['-silent']). | |
926 | test_eval(q) :- !,fail. | |
927 | test_eval(x) :- !,halt. | |
928 | test_eval(reload) :- !,use_module(probsrc(test_runner)), use_module(probsrc(testcases)). | |
929 | test_eval(edit) :- last_testcase_run(Id), | |
930 | cli_testcase(Id,_,_Infos,Arguments,_Comment), | |
931 | member(File,Arguments), safe_file_exists(File),!, | |
932 | edit_file(File). | |
933 | test_eval(e) :- !, test_eval(edit). | |
934 | test_eval(diff) :- !, diff_in_editor. | |
935 | test_eval(quit) :- !,fail. | |
936 | test_eval(halt) :- !,fail. | |
937 | test_eval(info) :- !, get_total_number_of_errors(X), format('~nTotal number of errors: ~w~n~n',[X]). | |
938 | test_eval(end_of_file) :- !,fail. % Ctrl-D | |
939 | test_eval(cat) :- !, print('Categories: '), | |
940 | findall(Cat, | |
941 | (cli_testcase(_, TestCategories, _, _, _), member(Cat, TestCategories)), List), | |
942 | sort(List,SL), print(SL),nl. | |
943 | test_eval(cat(Category)) :- !, | |
944 | wall(run_tests_by_category(Category,all)). | |
945 | test_eval(make(Categories)) :- !, | |
946 | wall(makefile_by_category(user_output,Categories)). | |
947 | test_eval(make(File,Categories)) :- !, | |
948 | wall(makefile_by_category(File,Categories)). | |
949 | test_eval(files(Category)) :- !, show_files(Category). | |
950 | test_eval(uses(Command)) :- !, wall(run_tests_using_command(Command)). | |
951 | test_eval(uses(Pref,Val)) :- !, wall(run_tests_using_preference(Pref,Val)). | |
952 | test_eval(p) :- !, test_eval(profile). | |
953 | test_eval(ps) :- !, test_eval(profile_stats). | |
954 | test_eval(delta) :- !, print_delta_stats. | |
955 | test_eval(stats) :- !, print_current_stats. | |
956 | test_eval(start) :- !, performance_session_start. | |
957 | test_eval(stop) :- !, performance_session_end('log/test_runner_performance_log.pl'). | |
958 | test_eval(codespeed) :- !, | |
959 | performance_session_start, | |
960 | test_eval(cat(codespeed)), | |
961 | performance_session_end('log/test_runner_performance_codespeed_log.pl'). | |
962 | test_eval(profile) :- !, print('PROFILING : '), %spy([avl:avl_size/2]), | |
963 | (current_prolog_flag(profiling,on) | |
964 | -> set_prolog_flag(profiling,off), print('OFF') ; | |
965 | set_prolog_flag(profiling,on), print('ON')), | |
966 | nl,print('USE ps to print profile info'),nl. | |
967 | test_eval(profile_stats) :- !, nl,print('PROFILE INFORMATION:'), nl, | |
968 | on_exception(error(existence_error(_,_),_),print_profile, print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')), | |
969 | nl, debug:timer_statistics. | |
970 | test_eval(perf) :- !, toggle_perfmessages. | |
971 | test_eval(Category) :- valid_category(Category),!, | |
972 | wall(run_tests_by_category(Category,all)). | |
973 | test_eval([H|T]) :- number(H),!, wall(run_tests_by_id([H|T],_)). | |
974 | test_eval([Category|T]) :- valid_category(Category),!, | |
975 | wall(run_tests_by_category([Category|T],all)). | |
976 | test_eval(C) :- | |
977 | (C=help -> true ; print('*** Unknown command'), nl), | |
978 | print(' Commands: Nr, Nr-Nr, last, q, x, v, vv, uses(Cmd), uses(Pref,Val),'),nl, | |
979 | print(' repeat(id,nr), timeout(factor), e,edit,diff, r,'),nl, | |
980 | print(' cat, cat(Cats),make(Cats),make(File,Cats),files(Cat),'),nl, | |
981 | print(' profile, profile_stats, (to turn Prolog profiling on and print info)'),nl, | |
982 | print(' debug,debug_off, (to turn Prolog debug mode on or off)'),nl, | |
983 | print(' perf, reload, sc,'),nl, | |
984 | print(' * for setting preferences:'),nl, | |
985 | print(' p(PREF), p(PREF,VAL),'),nl, | |
986 | print(' clpfd,clpfd_off, smt,smt_off, chr,chr_off, cse,cse_subst,cse_off,'),nl, | |
987 | print(' random,random_off, trace_info, nopt,'),nl, | |
988 | print(' cache, perf, (turn ProB caching or performance messages on)'),nl, | |
989 | print(' trace,trace_off, (set TRACE_UPON_ERROR preference)'),nl, | |
990 | print(' * statistics:'),nl, | |
991 | print(' delta, stats, info.'),nl. | |
992 | ||
993 | safe_file_exists(F) :- atom(F), file_exists(F). | |
994 | ||
995 | valid_category(Cat) :- | |
996 | cli_testcase(_Id, TestCategories, _Infos, _Arguments, _Comment), | |
997 | member(Cat, TestCategories). |