Turbo Constraint Solver
Loading...
Searching...
No Matches
common_solving.hpp
Go to the documentation of this file.
1// Copyright 2023 Pierre Talbot
2
3#ifndef TURBO_COMMON_SOLVING_HPP
4#define TURBO_COMMON_SOLVING_HPP
5
6#ifdef _WINDOWS
7#include <atomic>
8#endif
9#include <algorithm>
10#include <chrono>
11#include <thread>
12#include <csignal>
13
14#include "config.hpp"
15#include "statistics.hpp"
16
17#include "battery/utility.hpp"
18#include "battery/allocator.hpp"
19#include "battery/vector.hpp"
20#include "battery/shared_ptr.hpp"
21
22#include "lala/simplifier.hpp"
23#include "lala/vstore.hpp"
24#include "lala/cartesian_product.hpp"
25#include "lala/interval.hpp"
26#include "lala/pc.hpp"
27#include "lala/terms.hpp"
28#include "lala/fixpoint.hpp"
29#include "lala/search_tree.hpp"
30#include "lala/bab.hpp"
31#include "lala/split_strategy.hpp"
32#include "lala/interpretation.hpp"
33
34#include "lala/flatzinc_parser.hpp"
35
36#ifdef WITH_XCSP3PARSER
37 #include "lala/XCSP3_parser.hpp"
38#endif
39
40using namespace lala;
41
42#ifdef _WINDOWS
43//
44// The Microsoft Windows API does not support sigprocmask() or sigpending(),
45// so we have to fall back to traditional signal handling.
46//
47static std::atomic<bool> got_signal;
48static void (*prev_sigint)(int);
49static void (*prev_sigterm)(int);
50
51void signal_handler(int signum)
52{
53 signal(SIGINT, signal_handler); // re-arm
54 signal(SIGTERM, signal_handler); // re-arm
55
56 got_signal = true; // volatile
57
58 if (signum == SIGINT && prev_sigint != SIG_DFL && prev_sigint != SIG_IGN) {
59 (*prev_sigint)(signum);
60 }
61 if (signum == SIGTERM && prev_sigterm != SIG_DFL && prev_sigterm != SIG_IGN) {
62 (*prev_sigterm)(signum);
63 }
64}
65
66void block_signal_ctrlc() {
67 prev_sigint = signal(SIGINT, signal_handler);
68 prev_sigterm = signal(SIGTERM, signal_handler);
69}
70
71bool must_quit() {
72 return static_cast<bool>(got_signal);
73}
74
75#else
76
78 sigset_t ctrlc;
79 sigemptyset(&ctrlc);
80 sigaddset(&ctrlc, SIGINT);
81 sigaddset(&ctrlc, SIGTERM);
82 if(sigprocmask(SIG_BLOCK, &ctrlc, NULL) != 0) {
83 printf("%% ERROR: Unable to deal with CTRL-C. Therefore, we will not be able to print the latest solution before quitting.\n");
84 perror(NULL);
85 return;
86 }
87}
88
89bool must_quit() {
90 sigset_t pending_sigs;
91 sigemptyset(&pending_sigs);
92 sigpending(&pending_sigs);
93 return sigismember(&pending_sigs, SIGINT) == 1 || sigismember(&pending_sigs, SIGTERM) == 1;
94}
95#endif
96
97/** Check if the timeout of the current execution is exceeded and returns `false` otherwise.
98 * It also update the statistics relevant to the solving duration and the exhaustive flag if we reach the timeout.
99 */
100template <class A, class Timepoint>
101bool check_timeout(A& a, const Timepoint& start) {
102 auto now = std::chrono::high_resolution_clock::now();
103 a.stats.duration = std::chrono::duration_cast<std::chrono::milliseconds>(now - start).count();
104 if(a.config.timeout_ms == 0) {
105 return true;
106 }
107 if(a.stats.duration >= static_cast<int64_t>(a.config.timeout_ms)) {
108 if(a.config.verbose_solving) {
109 printf("%% CPU: Timeout reached.\n");
110 }
111 a.stats.exhaustive = false;
112 return false;
113 }
114 return true;
115}
116
117/** This is a simple wrapper aimed at giving a unique type to the allocator, to use them in AbstractDeps. */
118template <class Alloc, size_t n>
121 UniqueAlloc() = default;
122 CUDA UniqueAlloc(const Alloc& alloc): allocator(alloc) {}
123 UniqueAlloc(const UniqueAlloc& alloc) = default;
124 UniqueAlloc(UniqueAlloc&& alloc) = default;
125 UniqueAlloc& operator=(const UniqueAlloc& alloc) = default;
126 UniqueAlloc& operator=(UniqueAlloc&& alloc) = default;
127 CUDA void* allocate(size_t bytes) {
128 return allocator.allocate(bytes);
129 }
130 CUDA void deallocate(void* data) {
131 allocator.deallocate(data);
132 }
133};
134
135template <class Alloc, size_t n>
137 CUDA void* allocate(size_t bytes) {
138 return Alloc{}.allocate(bytes);
139 }
140 CUDA void deallocate(void* data) {
141 Alloc{}.deallocate(data);
142 }
143};
144
145/** This class is parametrized by a universe of discourse, which is the domain of the variables in the store and various allocators:
146 * - BasicAllocator: default allocator, used to allocate abstract domains, the environment, storing intermediate results, etc.
147 * - PropAllocator: allocator used for the PC abstract domain, to allocate the propagators.
148 * - StoreAllocator: allocator used for the store, to allocate the variables.
149 *
150 * Normally, you should use the fastest memory for the store, then for the propagators and then for the rest.
151 */
152template <class Universe,
153 class BasicAllocator,
154 class PropAllocator,
155 class StoreAllocator>
157 using universe_type = typename Universe::local_type;
158
159 /** Version of the abstract domains with a simple allocator, to represent the best solutions. */
160 using LIStore = VStore<universe_type, BasicAllocator>;
161
162 using IStore = VStore<Universe, StoreAllocator>;
163 using IPC = PC<IStore, PropAllocator>; // Interval Propagators Completion
164 using ISimplifier = Simplifier<IPC, BasicAllocator>;
165 using Split = SplitStrategy<IPC, BasicAllocator>;
166 using IST = SearchTree<IPC, Split, BasicAllocator>;
167 using IBAB = BAB<IST, LIStore>;
168
169 using basic_allocator_type = BasicAllocator;
170 using prop_allocator_type = PropAllocator;
171 using store_allocator_type = StoreAllocator;
172
174
176
178
179 /** We copy `other` in a new element, and ignore every variable not used in a GPU block.
180 * This is because copying everything in each block is very slow.
181 *
182 * NOTE: It is not the allocation itself that is slow, I think it calling many copy constructors for atomic variables (note that in simplifier we have an atomic memory if the underlying domain has one).
183 */
184 template <class U2, class BasicAlloc2, class PropAllocator2, class StoreAllocator2>
186 bool enable_sharing, // `true` if the propagators are not in the shared memory.
188 const BasicAllocator& basic_allocator = BasicAllocator(),
189 const PropAllocator& prop_allocator = PropAllocator(),
190 const StoreAllocator& store_allocator = StoreAllocator())
196 , stats(other.stats)
206 {
207 AbstractDeps<BasicAllocator, PropAllocator, StoreAllocator> deps{enable_sharing, basic_allocator, prop_allocator, store_allocator};
208 store = deps.template clone<IStore>(other.store);
209 ipc = deps.template clone<IPC>(other.ipc);
210 split = deps.template clone<Split>(other.split);
211 eps_split = deps.template clone<Split>(other.eps_split);
212 search_tree = deps.template clone<IST>(other.search_tree);
213 bab = deps.template clone<IBAB>(other.bab);
214 best = bab->optimum_ptr();
215 }
216
217 template <class U2, class BasicAlloc2, class PropAllocator2, class StoreAllocator2>
219 const BasicAllocator& basic_allocator = BasicAllocator(),
220 const PropAllocator& prop_allocator = PropAllocator(),
221 const StoreAllocator& store_allocator = StoreAllocator(),
222 const tag_copy_cons& tag = tag_copy_cons{})
223 : AbstractDomains(tag_gpu_block_copy{}, false, other, basic_allocator, prop_allocator, store_allocator)
224 {
226 env = other.env;
227 simplifier = battery::allocate_shared<ISimplifier, BasicAllocator>(basic_allocator, *other.simplifier, typename ISimplifier::light_copy_tag{}, ipc, basic_allocator);
228 }
229
230 CUDA AbstractDomains(const this_type& other,
231 const BasicAllocator& basic_allocator = BasicAllocator(),
232 const PropAllocator& prop_allocator = PropAllocator(),
233 const StoreAllocator& store_allocator = StoreAllocator())
235 {}
236
237 template <class Alloc>
239 const BasicAllocator& basic_allocator = BasicAllocator(),
240 const PropAllocator& prop_allocator = PropAllocator(),
241 const StoreAllocator& store_allocator = StoreAllocator())
256 {
257 size_t num_subproblems = 1;
258 num_subproblems <<= config.subproblems_power;
259 stats.eps_num_subproblems = num_subproblems;
260 }
261
263
264 BasicAllocator basic_allocator;
265 PropAllocator prop_allocator;
266 StoreAllocator store_allocator;
267
268 abstract_ptr<IStore> store;
269 abstract_ptr<IPC> ipc;
270 abstract_ptr<ISimplifier> simplifier;
271 abstract_ptr<Split> split;
272 abstract_ptr<Split> eps_split;
273 abstract_ptr<IST> search_tree;
274 abstract_ptr<LIStore> best;
275 abstract_ptr<IBAB> bab;
276
277 // The environment of variables, storing the mapping between variable's name and their representation in the abstract domains.
278 VarEnv<BasicAllocator> env;
279
280 // Information about the output of the solutions expected by MiniZinc.
281 SolverOutput<BasicAllocator> solver_output;
282
285
286 CUDA void allocate(int num_vars) {
287 env = VarEnv<basic_allocator_type>{basic_allocator};
288 store = battery::allocate_shared<IStore, StoreAllocator>(store_allocator, env.extends_abstract_dom(), num_vars, store_allocator);
289 ipc = battery::allocate_shared<IPC, PropAllocator>(prop_allocator, env.extends_abstract_dom(), store, prop_allocator);
290 // If the simplifier is already allocated, it means we are currently reallocating the abstract domains after preprocessing.
291 if(!simplifier) {
292 simplifier = battery::allocate_shared<ISimplifier, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
293 }
294 split = battery::allocate_shared<Split, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
295 eps_split = battery::allocate_shared<Split, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
296 search_tree = battery::allocate_shared<IST, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, split, basic_allocator);
297 // Note that `best` must have the same abstract type then store (otherwise projection of the variables will fail).
298 best = battery::allocate_shared<LIStore, BasicAllocator>(basic_allocator, store->aty(), num_vars, basic_allocator);
299 bab = battery::allocate_shared<IBAB, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), search_tree, best);
301 printf("%% Abstract domain allocated.\n");
302 }
303 }
304
305 // This force the deallocation of shared memory inside a kernel.
306 CUDA void deallocate() {
307 store = nullptr;
308 ipc = nullptr;
309 simplifier = nullptr;
310 split = nullptr;
311 eps_split = nullptr;
312 search_tree = nullptr;
313 bab = nullptr;
314 env = VarEnv<BasicAllocator>{basic_allocator}; // this is to release the memory used by `VarEnv`.
315 }
316
317 // Mainly to interpret the IN constraint in IPC instead of only over-approximating in intervals.
318 template <class F>
319 CUDA void typing(F& f) const {
320 switch(f.index()) {
321 case F::Seq:
322 if(f.sig() == ::lala::IN && f.seq(1).is(F::S) && f.seq(1).s().size() > 1) {
323 f.type_as(ipc->aty());
324 return;
325 }
326 for(int i = 0; i < f.seq().size(); ++i) {
327 typing(f.seq(i));
328 }
329 break;
330 case F::ESeq:
331 for(int i = 0; i < f.eseq().size(); ++i) {
332 typing(f.eseq(i));
333 }
334 break;
335 }
336 }
337
338private:
339
340 // We first try to interpret, and if it does not work, we interpret again with the diagnostics mode turned on.
341 template <class F, class Env, class A>
342 CUDA bool interpret_and_diagnose_and_tell(const F& f, Env& env, A& a) {
343 IDiagnostics diagnostics;
344 if(!interpret_and_tell(f, env, a, diagnostics)) {
345 IDiagnostics diagnostics2;
346 interpret_and_tell<true>(f, env, a, diagnostics2);
347 diagnostics2.print();
348 return false;
349 }
350 return true;
351 }
352
353public:
354 template <class F>
355 CUDA bool interpret(const F& f) {
357 printf("%% Interpreting the formula...\n");
358 }
359 if(!interpret_and_diagnose_and_tell(f, env, *bab)) {
360 return false;
361 }
362 stats.variables = store->vars();
363 stats.constraints = ipc->num_deductions();
364 bool can_interpret = true;
365 if(split->num_strategies() == 0) {
366 can_interpret &= interpret_default_strategy<F>();
367 }
368 if(eps_split->num_strategies() == 0) {
369 can_interpret &= interpret_default_eps_strategy<F>();
370 }
371 return can_interpret;
372 }
373
374 template <class F>
375 CUDA bool prepare_simplifier(F& f) {
377 printf("%% Simplifying the formula...\n");
378 }
379 IDiagnostics diagnostics;
380 typename ISimplifier::template tell_type<basic_allocator_type> tell{basic_allocator};
381 if(top_level_ginterpret_in<IKind::TELL>(*simplifier, f, env, tell, diagnostics)) {
382 simplifier->deduce(std::move(tell));
383 return true;
384 }
385 else if(config.verbose_solving) {
386 printf("WARNING: Could not simplify the formula because:\n");
387 IDiagnostics diagnostics2;
388 top_level_ginterpret_in<IKind::TELL, true>(*simplifier, f, env, tell, diagnostics2);
389 diagnostics2.print();
390 }
391 return false;
392 }
393
394 template <class F>
397 printf("%% Typing the formula...\n");
398 }
399 typing(f);
400 if(config.print_ast) {
401 printf("%% Typed AST:\n");
402 f.print(true);
403 printf("\n");
404 }
405 if(!interpret(f)) {
406 exit(EXIT_FAILURE);
407 }
408
409 if(config.print_ast) {
410 printf("%% Interpreted AST:\n");
411 ipc->deinterpret(env).print(true);
412 printf("\n");
413 }
415 printf("%% Formula has been interpreted.\n");
416 }
417 }
418
419 using FormulaPtr = battery::shared_ptr<TFormula<basic_allocator_type>, basic_allocator_type>;
420
422 auto start = std::chrono::high_resolution_clock::now();
423
424 // I. Parse the FlatZinc model.
425 FormulaPtr f;
427 f = parse_flatzinc(config.problem_path.data(), solver_output);
428 }
429#ifdef WITH_XCSP3PARSER
431 f = parse_xcsp3(config.problem_path.data(), solver_output);
432 }
433#endif
434 if(!f) {
435 std::cerr << "Could not parse input file." << std::endl;
436 exit(EXIT_FAILURE);
437 }
438
440 printf("%% Input file parsed\n");
441 }
442
443 if(config.print_ast) {
444 printf("%% Parsed AST:\n");
445 f->print(true);
446 printf("\n");
447 }
448
449 *f = eval(*f);
451 printf("%% Formula syntactically simplified.\n");
452 }
453
454 allocate(num_quantified_vars(*f));
456
457 auto interpretation_time = std::chrono::high_resolution_clock::now();
458 stats.interpretation_duration += std::chrono::duration_cast<std::chrono::milliseconds>(interpretation_time - start).count();
459 return f;
460 }
461
462 void preprocess() {
463 auto raw_formula = prepare_solver();
464 auto start = std::chrono::high_resolution_clock::now();
465 if(prepare_simplifier(*raw_formula)) {
466 GaussSeidelIteration fp_engine;
467 fp_engine.fixpoint(*ipc);
468 fp_engine.fixpoint(*simplifier);
469 auto f = simplifier->deinterpret();
470 stats.eliminated_variables = simplifier->num_eliminated_variables();
471 stats.eliminated_formulas = simplifier->num_eliminated_formulas();
472 allocate(num_quantified_vars(f));
474 }
475 auto interpretation_time = std::chrono::high_resolution_clock::now();
476 stats.interpretation_duration += std::chrono::duration_cast<std::chrono::milliseconds>(interpretation_time - start).count();
477 }
478
479private:
480 template <class F>
481 CUDA bool interpret_default_strategy() {
483 printf("%% No split strategy provided, using the default one (first_fail, indomain_min).\n");
484 }
485 config.free_search = true;
486 typename F::Sequence seq;
487 seq.push_back(F::make_nary("first_fail", {}));
488 seq.push_back(F::make_nary("indomain_min", {}));
489 for(int i = 0; i < env.num_vars(); ++i) {
490 seq.push_back(F::make_avar(env[i].avars[0]));
491 }
492 F search_strat = F::make_nary("search", std::move(seq));
493 if(!interpret_and_diagnose_and_tell(search_strat, env, *bab)) {
494 return false;
495 }
496 return true;
497 }
498
499 template <class F>
500 CUDA bool interpret_default_eps_strategy() {
501 typename F::Sequence seq;
502 seq.push_back(F::make_nary("first_fail", {}));
503 seq.push_back(F::make_nary("indomain_min", {}));
504 for(int i = 0; i < env.num_vars(); ++i) {
505 seq.push_back(F::make_avar(env[i].avars[0]));
506 }
507 F search_strat = F::make_nary("search", std::move(seq));
508 if(!interpret_and_diagnose_and_tell(search_strat, env, *eps_split)) {
509 return false;
510 }
511 return true;
512 }
513
514public:
515 CUDA bool on_node() {
516 stats.nodes++;
517 stats.depth_max = battery::max(stats.depth_max, search_tree->depth());
519 prune();
520 return true;
521 }
522 return false;
523 }
524
526 return bab->is_satisfaction() || config.print_intermediate_solutions;
527 }
528
529 CUDA void print_solution() {
530 solver_output.print_solution(env, *best, *simplifier);
532 }
533
534 CUDA void prune() {
535 stats.exhaustive = false;
536 }
537
538 /** Return `true` if the search state must be pruned. */
541 if(bab->is_satisfaction() && config.stop_after_n_solutions != 0 &&
543 {
544 prune();
545 return true;
546 }
547 return false;
548 }
549
550 CUDA bool on_solution_node() {
553 }
554 return update_solution_stats();
555 }
556
557 CUDA void on_failed_node() {
558 stats.fails += 1;
559 }
560
567
572 if(!bab->objective_var().is_untyped() && !best->is_top()) {
573 stats.print_mzn_objective(best->project(bab->objective_var()), bab->is_minimization());
574 }
576 }
577 }
578
579 /** Extract in `this` the content of `other`. */
580 template <class U2, class BasicAlloc2, class PropAlloc2, class StoreAlloc2>
582 if(bab->is_optimization() && !other.best->is_top() && bab->compare_bound(*other.best, *best)) {
583 other.best->extract(*best);
584 }
585 stats.meet(other.stats);
586 }
587};
588
589using Itv = Interval<local::ZLB>;
590
591template <class Universe, class Allocator = battery::standard_allocator>
592using CP = AbstractDomains<Universe,
593 battery::statistics_allocator<Allocator>,
594 battery::statistics_allocator<UniqueLightAlloc<Allocator, 0>>,
595 battery::statistics_allocator<UniqueLightAlloc<Allocator, 1>>>;
596
597#endif
Interval< local::ZLB > Itv
Definition common_solving.hpp:589
bool must_quit()
Definition common_solving.hpp:89
void block_signal_ctrlc()
Definition common_solving.hpp:77
bool check_timeout(A &a, const Timepoint &start)
Definition common_solving.hpp:101
Definition common_solving.hpp:175
Definition common_solving.hpp:177
Definition common_solving.hpp:156
abstract_ptr< IBAB > bab
Definition common_solving.hpp:275
void type_and_interpret(F &f)
Definition common_solving.hpp:395
SearchTree< IPC, Split, BasicAllocator > IST
Definition common_solving.hpp:166
Configuration< BasicAllocator > config
Definition common_solving.hpp:283
CUDA void typing(F &f) const
Definition common_solving.hpp:319
CUDA bool interpret(const F &f)
Definition common_solving.hpp:355
CUDA void deallocate()
Definition common_solving.hpp:306
VStore< universe_type, BasicAllocator > LIStore
Definition common_solving.hpp:160
CUDA void print_final_solution()
Definition common_solving.hpp:561
StoreAllocator store_allocator
Definition common_solving.hpp:266
CUDA AbstractDomains(const tag_gpu_block_copy &, bool enable_sharing, const AbstractDomains< U2, BasicAlloc2, PropAllocator2, StoreAllocator2 > &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:185
CUDA bool prepare_simplifier(F &f)
Definition common_solving.hpp:375
CUDA void allocate(int num_vars)
Definition common_solving.hpp:286
BasicAllocator basic_allocator
Definition common_solving.hpp:264
BAB< IST, LIStore > IBAB
Definition common_solving.hpp:167
CUDA void meet(AbstractDomains< U2, BasicAlloc2, PropAlloc2, StoreAlloc2 > &other)
Definition common_solving.hpp:581
CUDA bool on_solution_node()
Definition common_solving.hpp:550
CUDA bool on_node()
Definition common_solving.hpp:515
abstract_ptr< LIStore > best
Definition common_solving.hpp:274
battery::shared_ptr< TFormula< basic_allocator_type >, basic_allocator_type > FormulaPtr
Definition common_solving.hpp:419
AbstractDomains(AbstractDomains &&other)=default
abstract_ptr< IStore > store
Definition common_solving.hpp:268
VarEnv< BasicAllocator > env
Definition common_solving.hpp:278
CUDA bool is_printing_intermediate_sol()
Definition common_solving.hpp:525
Simplifier< IPC, BasicAllocator > ISimplifier
Definition common_solving.hpp:164
FormulaPtr prepare_solver()
Definition common_solving.hpp:421
abstract_ptr< IPC > ipc
Definition common_solving.hpp:269
PC< IStore, PropAllocator > IPC
Definition common_solving.hpp:163
CUDA AbstractDomains(const AbstractDomains< U2, BasicAlloc2, PropAllocator2, StoreAllocator2 > &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator(), const tag_copy_cons &tag=tag_copy_cons{})
Definition common_solving.hpp:218
abstract_ptr< IST > search_tree
Definition common_solving.hpp:273
CUDA void on_failed_node()
Definition common_solving.hpp:557
PropAllocator prop_allocator_type
Definition common_solving.hpp:170
BasicAllocator basic_allocator_type
Definition common_solving.hpp:169
SplitStrategy< IPC, BasicAllocator > Split
Definition common_solving.hpp:165
abstract_ptr< ISimplifier > simplifier
Definition common_solving.hpp:270
abstract_ptr< Split > eps_split
Definition common_solving.hpp:272
void preprocess()
Definition common_solving.hpp:462
abstract_ptr< Split > split
Definition common_solving.hpp:271
Statistics stats
Definition common_solving.hpp:284
SolverOutput< BasicAllocator > solver_output
Definition common_solving.hpp:281
CUDA bool update_solution_stats()
Definition common_solving.hpp:539
PropAllocator prop_allocator
Definition common_solving.hpp:265
typename Universe::local_type universe_type
Definition common_solving.hpp:157
VStore< Universe, StoreAllocator > IStore
Definition common_solving.hpp:162
StoreAllocator store_allocator_type
Definition common_solving.hpp:171
CUDA void print_solution()
Definition common_solving.hpp:529
CUDA void prune()
Definition common_solving.hpp:534
CUDA AbstractDomains(const this_type &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:230
CUDA AbstractDomains(const Configuration< Alloc > &config, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:238
CUDA void print_mzn_statistics()
Definition common_solving.hpp:568
Definition config.hpp:29
size_t stop_after_n_nodes
Definition config.hpp:33
battery::string< allocator_type > problem_path
Definition config.hpp:46
CUDA InputFormat input_format() const
Definition config.hpp:194
bool print_intermediate_solutions
Definition config.hpp:31
bool print_ast
Definition config.hpp:37
CUDA void print_mzn_statistics() const
Definition config.hpp:172
bool verbose_solving
Definition config.hpp:36
size_t stop_after_n_solutions
Definition config.hpp:32
bool free_search
Definition config.hpp:34
bool print_statistics
Definition config.hpp:35
size_t subproblems_power
Definition config.hpp:43
Definition statistics.hpp:26
size_t eps_num_subproblems
Definition statistics.hpp:37
size_t fails
Definition statistics.hpp:33
size_t depth_max
Definition statistics.hpp:35
CUDA void print_mzn_statistics() const
Definition statistics.hpp:92
size_t constraints
Definition statistics.hpp:28
CUDA void print_mzn_objective(const auto &obj, bool is_minimization) const
Definition statistics.hpp:118
CUDA void meet(const Statistics &other)
Definition statistics.hpp:62
int64_t interpretation_duration
Definition statistics.hpp:31
CUDA void print_mzn_end_stats() const
Definition statistics.hpp:114
size_t eliminated_variables
Definition statistics.hpp:42
CUDA void print_mzn_final_separator() const
Definition statistics.hpp:133
size_t nodes
Definition statistics.hpp:32
size_t eliminated_formulas
Definition statistics.hpp:43
size_t solutions
Definition statistics.hpp:34
size_t variables
Definition statistics.hpp:27
CUDA void print_mzn_separator() const
Definition statistics.hpp:129
size_t exhaustive
Definition statistics.hpp:36
Definition common_solving.hpp:119
CUDA void deallocate(void *data)
Definition common_solving.hpp:130
UniqueAlloc & operator=(UniqueAlloc &&alloc)=default
UniqueAlloc(const UniqueAlloc &alloc)=default
UniqueAlloc & operator=(const UniqueAlloc &alloc)=default
Alloc allocator
Definition common_solving.hpp:120
CUDA void * allocate(size_t bytes)
Definition common_solving.hpp:127
UniqueAlloc()=default
UniqueAlloc(UniqueAlloc &&alloc)=default
CUDA UniqueAlloc(const Alloc &alloc)
Definition common_solving.hpp:122
Definition common_solving.hpp:136
CUDA void * allocate(size_t bytes)
Definition common_solving.hpp:137
CUDA void deallocate(void *data)
Definition common_solving.hpp:140