Turbo Constraint Solver
Loading...
Searching...
No Matches
common_solving.hpp
Go to the documentation of this file.
1// Copyright 2023 Pierre Talbot
2
3#ifndef TURBO_COMMON_SOLVING_HPP
4#define TURBO_COMMON_SOLVING_HPP
5
6#ifdef _WINDOWS
7#include <atomic>
8#endif
9#include <algorithm>
10#include <chrono>
11#include <thread>
12#include <csignal>
13
14#include "config.hpp"
15#include "statistics.hpp"
16
17#include "battery/utility.hpp"
18#include "battery/allocator.hpp"
19#include "battery/vector.hpp"
20#include "battery/shared_ptr.hpp"
21
22#include "lala/simplifier.hpp"
23#include "lala/vstore.hpp"
24#include "lala/cartesian_product.hpp"
25#include "lala/interval.hpp"
26#include "lala/pc.hpp"
27#include "lala/terms.hpp"
28#include "lala/fixpoint.hpp"
29#include "lala/search_tree.hpp"
30#include "lala/bab.hpp"
31#include "lala/split_strategy.hpp"
32#include "lala/interpretation.hpp"
33
34#include "lala/flatzinc_parser.hpp"
35
36#ifdef WITH_XCSP3PARSER
37 #include "lala/XCSP3_parser.hpp"
38#endif
39
40using namespace lala;
41
42#ifdef _WINDOWS
43//
44// The Microsoft Windows API does not support sigprocmask() or sigpending(),
45// so we have to fall back to traditional signal handling.
46//
47static std::atomic<bool> got_signal;
48static void (*prev_sigint)(int);
49static void (*prev_sigterm)(int);
50
51void signal_handler(int signum)
52{
53 signal(SIGINT, signal_handler); // re-arm
54 signal(SIGTERM, signal_handler); // re-arm
55
56 got_signal = true; // volatile
57
58 if (signum == SIGINT && prev_sigint != SIG_DFL && prev_sigint != SIG_IGN) {
59 (*prev_sigint)(signum);
60 }
61 if (signum == SIGTERM && prev_sigterm != SIG_DFL && prev_sigterm != SIG_IGN) {
62 (*prev_sigterm)(signum);
63 }
64}
65
66void block_signal_ctrlc() {
67 prev_sigint = signal(SIGINT, signal_handler);
68 prev_sigterm = signal(SIGTERM, signal_handler);
69}
70
71bool must_quit() {
72 return static_cast<bool>(got_signal);
73}
74
75#else
76
78 sigset_t ctrlc;
79 sigemptyset(&ctrlc);
80 sigaddset(&ctrlc, SIGINT);
81 sigaddset(&ctrlc, SIGTERM);
82 if(sigprocmask(SIG_BLOCK, &ctrlc, NULL) != 0) {
83 printf("%% ERROR: Unable to deal with CTRL-C. Therefore, we will not be able to print the latest solution before quitting.\n");
84 perror(NULL);
85 return;
86 }
87}
88
89bool must_quit() {
90 sigset_t pending_sigs;
91 sigemptyset(&pending_sigs);
92 sigpending(&pending_sigs);
93 return sigismember(&pending_sigs, SIGINT) == 1 || sigismember(&pending_sigs, SIGTERM) == 1;
94}
95#endif
96
97/** Check if the timeout of the current execution is exceeded and returns `false` otherwise.
98 * It also update the statistics relevant to the solving duration and the exhaustive flag if we reach the timeout.
99 */
100template <class A, class Timepoint>
101bool check_timeout(A& a, const Timepoint& start) {
102 auto now = std::chrono::high_resolution_clock::now();
103 a.stats.duration = std::chrono::duration_cast<std::chrono::milliseconds>(now - start).count();
104 if(a.config.timeout_ms == 0) {
105 return true;
106 }
107 if(a.stats.duration >= static_cast<int64_t>(a.config.timeout_ms)) {
108 if(a.config.verbose_solving) {
109 printf("%% CPU: Timeout reached.\n");
110 }
111 a.stats.exhaustive = false;
112 return false;
113 }
114 return true;
115}
116
117/** This is a simple wrapper aimed at giving a unique type to the allocator, to use them in AbstractDeps. */
118template <class Alloc, size_t n>
121 UniqueAlloc() = default;
122 CUDA UniqueAlloc(const Alloc& alloc): allocator(alloc) {}
123 UniqueAlloc(const UniqueAlloc& alloc) = default;
124 UniqueAlloc& operator=(const UniqueAlloc& alloc) = default;
125 CUDA void* allocate(size_t bytes) {
126 return allocator.allocate(bytes);
127 }
128 CUDA void deallocate(void* data) {
129 allocator.deallocate(data);
130 }
131};
132
133template <class Alloc, size_t n>
135 CUDA void* allocate(size_t bytes) {
136 return Alloc{}.allocate(bytes);
137 }
138 CUDA void deallocate(void* data) {
139 Alloc{}.deallocate(data);
140 }
141};
142
143/** This class is parametrized by a universe of discourse, which is the domain of the variables in the store and various allocators:
144 * - BasicAllocator: default allocator, used to allocate abstract domains, the environment, storing intermediate results, etc.
145 * - PropAllocator: allocator used for the PC abstract domain, to allocate the propagators.
146 * - StoreAllocator: allocator used for the store, to allocate the variables.
147 *
148 * Normally, you should use the fastest memory for the store, then for the propagators and then for the rest.
149 */
150template <class Universe,
151 class BasicAllocator,
152 class PropAllocator,
153 class StoreAllocator>
155 using universe_type = typename Universe::local_type;
156
157 /** Version of the abstract domains with a simple allocator, to represent the best solutions. */
158 using LIStore = VStore<universe_type, BasicAllocator>;
159
160 using IStore = VStore<Universe, StoreAllocator>;
161 using IPC = PC<IStore, PropAllocator>; // Interval Propagators Completion
162 using ISimplifier = Simplifier<IPC, BasicAllocator>;
163 using Split = SplitStrategy<IPC, BasicAllocator>;
164 using IST = SearchTree<IPC, Split, BasicAllocator>;
165 using IBAB = BAB<IST, LIStore>;
166
167 using basic_allocator_type = BasicAllocator;
168 using prop_allocator_type = PropAllocator;
169 using store_allocator_type = StoreAllocator;
170
172
174
176
177 /** We copy `other` in a new element, and ignore every variable not used in a GPU block.
178 * This is because copying everything in each block is very slow.
179 *
180 * NOTE: It is not the allocation itself that is slow, I think it calling many copy constructors for atomic variables (note that in simplifier we have an atomic memory if the underlying domain has one).
181 */
182 template <class U2, class BasicAlloc2, class PropAllocator2, class StoreAllocator2>
184 bool enable_sharing, // `true` if the propagators are not in the shared memory.
186 const BasicAllocator& basic_allocator = BasicAllocator(),
187 const PropAllocator& prop_allocator = PropAllocator(),
188 const StoreAllocator& store_allocator = StoreAllocator())
194 , stats(other.stats)
204 {
205 AbstractDeps<BasicAllocator, PropAllocator, StoreAllocator> deps{enable_sharing, basic_allocator, prop_allocator, store_allocator};
206 store = deps.template clone<IStore>(other.store);
207 ipc = deps.template clone<IPC>(other.ipc);
208 split = deps.template clone<Split>(other.split);
209 eps_split = deps.template clone<Split>(other.eps_split);
210 search_tree = deps.template clone<IST>(other.search_tree);
211 bab = deps.template clone<IBAB>(other.bab);
212 best = bab->optimum_ptr();
213 }
214
215 template <class U2, class BasicAlloc2, class PropAllocator2, class StoreAllocator2>
217 const BasicAllocator& basic_allocator = BasicAllocator(),
218 const PropAllocator& prop_allocator = PropAllocator(),
219 const StoreAllocator& store_allocator = StoreAllocator(),
220 const tag_copy_cons& tag = tag_copy_cons{})
221 : AbstractDomains(tag_gpu_block_copy{}, false, other, basic_allocator, prop_allocator, store_allocator)
222 {
224 env = other.env;
225 simplifier = battery::allocate_shared<ISimplifier, BasicAllocator>(basic_allocator, *other.simplifier, typename ISimplifier::light_copy_tag{}, ipc, basic_allocator);
226 }
227
228 CUDA AbstractDomains(const this_type& other,
229 const BasicAllocator& basic_allocator = BasicAllocator(),
230 const PropAllocator& prop_allocator = PropAllocator(),
231 const StoreAllocator& store_allocator = StoreAllocator())
233 {}
234
235 template <class Alloc>
237 const BasicAllocator& basic_allocator = BasicAllocator(),
238 const PropAllocator& prop_allocator = PropAllocator(),
239 const StoreAllocator& store_allocator = StoreAllocator())
254 {}
255
257
258 BasicAllocator basic_allocator;
259 PropAllocator prop_allocator;
260 StoreAllocator store_allocator;
261
262 abstract_ptr<IStore> store;
263 abstract_ptr<IPC> ipc;
264 abstract_ptr<ISimplifier> simplifier;
265 abstract_ptr<Split> split;
266 abstract_ptr<Split> eps_split;
267 abstract_ptr<IST> search_tree;
268 abstract_ptr<LIStore> best;
269 abstract_ptr<IBAB> bab;
270
271 // The environment of variables, storing the mapping between variable's name and their representation in the abstract domains.
272 VarEnv<BasicAllocator> env;
273
274 // Information about the output of the solutions expected by MiniZinc.
275 SolverOutput<BasicAllocator> solver_output;
276
279
280 CUDA void allocate(int num_vars) {
281 env = VarEnv<basic_allocator_type>{basic_allocator};
282 store = battery::allocate_shared<IStore, StoreAllocator>(store_allocator, env.extends_abstract_dom(), num_vars, store_allocator);
283 ipc = battery::allocate_shared<IPC, PropAllocator>(prop_allocator, env.extends_abstract_dom(), store, prop_allocator);
284 // If the simplifier is already allocated, it means we are currently reallocating the abstract domains after preprocessing.
285 if(!simplifier) {
286 simplifier = battery::allocate_shared<ISimplifier, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
287 }
288 split = battery::allocate_shared<Split, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
289 eps_split = battery::allocate_shared<Split, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, basic_allocator);
290 search_tree = battery::allocate_shared<IST, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), ipc, split, basic_allocator);
291 // Note that `best` must have the same abstract type then store (otherwise projection of the variables will fail).
292 best = battery::allocate_shared<LIStore, BasicAllocator>(basic_allocator, store->aty(), num_vars, basic_allocator);
293 bab = battery::allocate_shared<IBAB, BasicAllocator>(basic_allocator, env.extends_abstract_dom(), search_tree, best);
295 printf("%% Abstract domain allocated.\n");
296 }
297 }
298
299 // This force the deallocation of shared memory inside a kernel.
300 CUDA void deallocate() {
301 store = nullptr;
302 ipc = nullptr;
303 simplifier = nullptr;
304 split = nullptr;
305 eps_split = nullptr;
306 search_tree = nullptr;
307 bab = nullptr;
308 env = VarEnv<BasicAllocator>{basic_allocator}; // this is to release the memory used by `VarEnv`.
309 }
310
311 // Mainly to interpret the IN constraint in IPC instead of only over-approximating in intervals.
312 template <class F>
313 CUDA void typing(F& f) const {
314 switch(f.index()) {
315 case F::Seq:
316 if(f.sig() == ::lala::IN && f.seq(1).is(F::S) && f.seq(1).s().size() > 1) {
317 f.type_as(ipc->aty());
318 return;
319 }
320 for(int i = 0; i < f.seq().size(); ++i) {
321 typing(f.seq(i));
322 }
323 break;
324 case F::ESeq:
325 for(int i = 0; i < f.eseq().size(); ++i) {
326 typing(f.eseq(i));
327 }
328 break;
329 }
330 }
331
332private:
333
334 // We first try to interpret, and if it does not work, we interpret again with the diagnostics mode turned on.
335 template <class F, class Env, class A>
336 CUDA bool interpret_and_diagnose_and_tell(const F& f, Env& env, A& a) {
337 IDiagnostics diagnostics;
338 if(!interpret_and_tell(f, env, a, diagnostics)) {
339 IDiagnostics diagnostics2;
340 interpret_and_tell<true>(f, env, a, diagnostics2);
341 diagnostics2.print();
342 return false;
343 }
344 return true;
345 }
346
347public:
348 template <class F>
349 CUDA bool interpret(const F& f) {
351 printf("%% Interpreting the formula...\n");
352 }
353 if(!interpret_and_diagnose_and_tell(f, env, *bab)) {
354 return false;
355 }
356 stats.variables = store->vars();
357 stats.constraints = ipc->num_deductions();
358 bool can_interpret = true;
359 if(split->num_strategies() == 0) {
360 can_interpret &= interpret_default_strategy<F>();
361 }
362 if(eps_split->num_strategies() == 0) {
363 can_interpret &= interpret_default_eps_strategy<F>();
364 }
365 return can_interpret;
366 }
367
368 template <class F>
369 CUDA bool prepare_simplifier(F& f) {
371 printf("%% Simplifying the formula...\n");
372 }
373 IDiagnostics diagnostics;
374 typename ISimplifier::template tell_type<basic_allocator_type> tell{basic_allocator};
375 if(top_level_ginterpret_in<IKind::TELL>(*simplifier, f, env, tell, diagnostics)) {
376 simplifier->deduce(std::move(tell));
377 return true;
378 }
379 else if(config.verbose_solving) {
380 printf("WARNING: Could not simplify the formula because:\n");
381 IDiagnostics diagnostics2;
382 top_level_ginterpret_in<IKind::TELL, true>(*simplifier, f, env, tell, diagnostics2);
383 diagnostics2.print();
384 }
385 return false;
386 }
387
388 template <class F>
391 printf("%% Typing the formula...\n");
392 }
393 typing(f);
394 if(config.print_ast) {
395 printf("%% Typed AST:\n");
396 f.print(true);
397 printf("\n");
398 }
399 if(!interpret(f)) {
400 exit(EXIT_FAILURE);
401 }
402
403 if(config.print_ast) {
404 printf("%% Interpreted AST:\n");
405 ipc->deinterpret(env).print(true);
406 printf("\n");
407 }
409 printf("%% Formula has been interpreted.\n");
410 }
411 }
412
413 using FormulaPtr = battery::shared_ptr<TFormula<basic_allocator_type>, basic_allocator_type>;
414
416 auto start = std::chrono::high_resolution_clock::now();
417
418 // I. Parse the FlatZinc model.
419 FormulaPtr f;
421 f = parse_flatzinc(config.problem_path.data(), solver_output);
422 }
423#ifdef WITH_XCSP3PARSER
425 f = parse_xcsp3(config.problem_path.data(), solver_output);
426 }
427#endif
428 if(!f) {
429 std::cerr << "Could not parse input file." << std::endl;
430 exit(EXIT_FAILURE);
431 }
432
434 printf("%% Input file parsed\n");
435 }
436
437 if(config.print_ast) {
438 printf("%% Parsed AST:\n");
439 f->print(true);
440 printf("\n");
441 }
442
443 allocate(num_quantified_vars(*f));
445
446 auto interpretation_time = std::chrono::high_resolution_clock::now();
447 stats.interpretation_duration += std::chrono::duration_cast<std::chrono::milliseconds>(interpretation_time - start).count();
448 return f;
449 }
450
451 void preprocess() {
452 auto raw_formula = prepare_solver();
453 auto start = std::chrono::high_resolution_clock::now();
454 if(prepare_simplifier(*raw_formula)) {
455 GaussSeidelIteration fp_engine;
456 fp_engine.fixpoint(*ipc);
457 fp_engine.fixpoint(*simplifier);
458 auto f = simplifier->deinterpret();
459 stats.eliminated_variables = simplifier->num_eliminated_variables();
460 stats.eliminated_formulas = simplifier->num_eliminated_formulas();
461 allocate(num_quantified_vars(f));
463 }
464 auto interpretation_time = std::chrono::high_resolution_clock::now();
465 stats.interpretation_duration += std::chrono::duration_cast<std::chrono::milliseconds>(interpretation_time - start).count();
466 }
467
468private:
469 template <class F>
470 CUDA bool interpret_default_strategy() {
472 printf("%% No split strategy provided, using the default one (first_fail, indomain_min).\n");
473 }
474 config.free_search = true;
475 typename F::Sequence seq;
476 seq.push_back(F::make_nary("first_fail", {}));
477 seq.push_back(F::make_nary("indomain_min", {}));
478 for(int i = 0; i < env.num_vars(); ++i) {
479 seq.push_back(F::make_avar(env[i].avars[0]));
480 }
481 F search_strat = F::make_nary("search", std::move(seq));
482 if(!interpret_and_diagnose_and_tell(search_strat, env, *bab)) {
483 return false;
484 }
485 return true;
486 }
487
488 template <class F>
489 CUDA bool interpret_default_eps_strategy() {
490 typename F::Sequence seq;
491 seq.push_back(F::make_nary("first_fail", {}));
492 seq.push_back(F::make_nary("indomain_min", {}));
493 for(int i = 0; i < env.num_vars(); ++i) {
494 seq.push_back(F::make_avar(env[i].avars[0]));
495 }
496 F search_strat = F::make_nary("search", std::move(seq));
497 if(!interpret_and_diagnose_and_tell(search_strat, env, *eps_split)) {
498 return false;
499 }
500 return true;
501 }
502
503public:
504 CUDA bool on_node() {
505 stats.nodes++;
506 stats.depth_max = battery::max(stats.depth_max, search_tree->depth());
508 prune();
509 return true;
510 }
511 return false;
512 }
513
515 return bab->is_satisfaction() || config.print_intermediate_solutions;
516 }
517
518 CUDA void print_solution() {
519 solver_output.print_solution(env, *best, *simplifier);
521 }
522
523 CUDA void prune() {
524 stats.exhaustive = false;
525 }
526
527 /** Return `true` if the search state must be pruned. */
530 if(bab->is_satisfaction() && config.stop_after_n_solutions != 0 &&
532 {
533 prune();
534 return true;
535 }
536 return false;
537 }
538
539 CUDA bool on_solution_node() {
542 }
543 return update_solution_stats();
544 }
545
546 CUDA void on_failed_node() {
547 stats.fails += 1;
548 }
549
556
561 if(!bab->objective_var().is_untyped() && !best->is_top()) {
562 stats.print_mzn_objective(best->project(bab->objective_var()), bab->is_minimization());
563 }
565 }
566 }
567
568 /** Extract in `this` the content of `other`. */
569 template <class U2, class BasicAlloc2, class PropAlloc2, class StoreAlloc2>
571 if(bab->is_optimization() && !other.best->is_top() && bab->compare_bound(*other.best, *best)) {
572 other.best->extract(*best);
573 }
574 stats.meet(other.stats);
575 }
576};
577
578using Itv = Interval<local::ZLB>;
579
580template <class Universe>
581using CP = AbstractDomains<Universe,
582 battery::statistics_allocator<battery::standard_allocator>,
583 battery::statistics_allocator<UniqueLightAlloc<battery::standard_allocator, 0>>,
584 battery::statistics_allocator<UniqueLightAlloc<battery::standard_allocator, 1>>>;
585
586#endif
Interval< local::ZLB > Itv
Definition common_solving.hpp:578
bool must_quit()
Definition common_solving.hpp:89
void block_signal_ctrlc()
Definition common_solving.hpp:77
bool check_timeout(A &a, const Timepoint &start)
Definition common_solving.hpp:101
Definition common_solving.hpp:173
Definition common_solving.hpp:175
Definition common_solving.hpp:154
abstract_ptr< IBAB > bab
Definition common_solving.hpp:269
void type_and_interpret(F &f)
Definition common_solving.hpp:389
SearchTree< IPC, Split, BasicAllocator > IST
Definition common_solving.hpp:164
Configuration< BasicAllocator > config
Definition common_solving.hpp:277
CUDA void typing(F &f) const
Definition common_solving.hpp:313
CUDA bool interpret(const F &f)
Definition common_solving.hpp:349
CUDA void deallocate()
Definition common_solving.hpp:300
VStore< universe_type, BasicAllocator > LIStore
Definition common_solving.hpp:158
CUDA void print_final_solution()
Definition common_solving.hpp:550
StoreAllocator store_allocator
Definition common_solving.hpp:260
CUDA AbstractDomains(const tag_gpu_block_copy &, bool enable_sharing, const AbstractDomains< U2, BasicAlloc2, PropAllocator2, StoreAllocator2 > &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:183
CUDA bool prepare_simplifier(F &f)
Definition common_solving.hpp:369
CUDA void allocate(int num_vars)
Definition common_solving.hpp:280
BasicAllocator basic_allocator
Definition common_solving.hpp:258
BAB< IST, LIStore > IBAB
Definition common_solving.hpp:165
CUDA void meet(AbstractDomains< U2, BasicAlloc2, PropAlloc2, StoreAlloc2 > &other)
Definition common_solving.hpp:570
CUDA bool on_solution_node()
Definition common_solving.hpp:539
CUDA bool on_node()
Definition common_solving.hpp:504
abstract_ptr< LIStore > best
Definition common_solving.hpp:268
battery::shared_ptr< TFormula< basic_allocator_type >, basic_allocator_type > FormulaPtr
Definition common_solving.hpp:413
AbstractDomains(AbstractDomains &&other)=default
abstract_ptr< IStore > store
Definition common_solving.hpp:262
VarEnv< BasicAllocator > env
Definition common_solving.hpp:272
CUDA bool is_printing_intermediate_sol()
Definition common_solving.hpp:514
Simplifier< IPC, BasicAllocator > ISimplifier
Definition common_solving.hpp:162
FormulaPtr prepare_solver()
Definition common_solving.hpp:415
abstract_ptr< IPC > ipc
Definition common_solving.hpp:263
PC< IStore, PropAllocator > IPC
Definition common_solving.hpp:161
CUDA AbstractDomains(const AbstractDomains< U2, BasicAlloc2, PropAllocator2, StoreAllocator2 > &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator(), const tag_copy_cons &tag=tag_copy_cons{})
Definition common_solving.hpp:216
abstract_ptr< IST > search_tree
Definition common_solving.hpp:267
CUDA void on_failed_node()
Definition common_solving.hpp:546
PropAllocator prop_allocator_type
Definition common_solving.hpp:168
BasicAllocator basic_allocator_type
Definition common_solving.hpp:167
SplitStrategy< IPC, BasicAllocator > Split
Definition common_solving.hpp:163
abstract_ptr< ISimplifier > simplifier
Definition common_solving.hpp:264
abstract_ptr< Split > eps_split
Definition common_solving.hpp:266
void preprocess()
Definition common_solving.hpp:451
abstract_ptr< Split > split
Definition common_solving.hpp:265
Statistics stats
Definition common_solving.hpp:278
SolverOutput< BasicAllocator > solver_output
Definition common_solving.hpp:275
CUDA bool update_solution_stats()
Definition common_solving.hpp:528
PropAllocator prop_allocator
Definition common_solving.hpp:259
typename Universe::local_type universe_type
Definition common_solving.hpp:155
VStore< Universe, StoreAllocator > IStore
Definition common_solving.hpp:160
StoreAllocator store_allocator_type
Definition common_solving.hpp:169
CUDA void print_solution()
Definition common_solving.hpp:518
CUDA void prune()
Definition common_solving.hpp:523
CUDA AbstractDomains(const this_type &other, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:228
CUDA AbstractDomains(const Configuration< Alloc > &config, const BasicAllocator &basic_allocator=BasicAllocator(), const PropAllocator &prop_allocator=PropAllocator(), const StoreAllocator &store_allocator=StoreAllocator())
Definition common_solving.hpp:236
CUDA void print_mzn_statistics()
Definition common_solving.hpp:557
Definition config.hpp:28
size_t stop_after_n_nodes
Definition config.hpp:32
battery::string< allocator_type > problem_path
Definition config.hpp:45
CUDA InputFormat input_format() const
Definition config.hpp:181
bool print_intermediate_solutions
Definition config.hpp:30
bool print_ast
Definition config.hpp:36
CUDA void print_mzn_statistics() const
Definition config.hpp:157
bool verbose_solving
Definition config.hpp:35
size_t stop_after_n_solutions
Definition config.hpp:31
bool free_search
Definition config.hpp:33
bool print_statistics
Definition config.hpp:34
Definition statistics.hpp:12
size_t fails
Definition statistics.hpp:19
size_t depth_max
Definition statistics.hpp:21
CUDA void print_mzn_statistics() const
Definition statistics.hpp:78
size_t constraints
Definition statistics.hpp:14
CUDA void print_mzn_objective(const auto &obj, bool is_minimization) const
Definition statistics.hpp:104
CUDA void meet(const Statistics &other)
Definition statistics.hpp:48
int64_t interpretation_duration
Definition statistics.hpp:17
CUDA void print_mzn_end_stats() const
Definition statistics.hpp:100
size_t eliminated_variables
Definition statistics.hpp:28
CUDA void print_mzn_final_separator() const
Definition statistics.hpp:119
size_t nodes
Definition statistics.hpp:18
size_t eliminated_formulas
Definition statistics.hpp:29
size_t solutions
Definition statistics.hpp:20
size_t variables
Definition statistics.hpp:13
CUDA void print_mzn_separator() const
Definition statistics.hpp:115
size_t exhaustive
Definition statistics.hpp:22
Definition common_solving.hpp:119
CUDA void deallocate(void *data)
Definition common_solving.hpp:128
UniqueAlloc(const UniqueAlloc &alloc)=default
UniqueAlloc & operator=(const UniqueAlloc &alloc)=default
Alloc allocator
Definition common_solving.hpp:120
CUDA void * allocate(size_t bytes)
Definition common_solving.hpp:125
UniqueAlloc()=default
CUDA UniqueAlloc(const Alloc &alloc)
Definition common_solving.hpp:122
Definition common_solving.hpp:134
CUDA void * allocate(size_t bytes)
Definition common_solving.hpp:135
CUDA void deallocate(void *data)
Definition common_solving.hpp:138