Lattice Land Powerdomains Library
split_strategy.hpp
Go to the documentation of this file.
1 // Copyright 2023 Pierre Talbot
2 
3 #ifndef LALA_POWER_SPLIT_STRATEGY_HPP
4 #define LALA_POWER_SPLIT_STRATEGY_HPP
5 
6 #include "battery/vector.hpp"
7 #include "battery/shared_ptr.hpp"
8 #include "branch.hpp"
9 #include "lala/logic/logic.hpp"
10 #include "lala/abstract_deps.hpp"
11 
12 namespace lala {
13 
14 enum class VariableOrder {
16  FIRST_FAIL,
18  SMALLEST,
19  LARGEST,
20  // unsupported:
21  // OCCURRENCE,
22  // MOST_CONSTRAINED,
23  // MAX_REGRET,
24  // DOM_W_DEG,
25  // RANDOM
26 };
27 
28 enum class ValueOrder {
29  MIN,
30  MAX,
31  MEDIAN,
32  SPLIT,
34  // unsupported:
35  // INTERVAL,
36  // RANDOM,
37  // MIDDLE,
38 };
39 
40 template <class A, class Allocator = typename A::allocator_type>
42 public:
43  using allocator_type = Allocator;
44  using sub_type = A;
45  using sub_allocator_type = typename sub_type::allocator_type;
46  using sub_tell_type = sub_type::template tell_type<allocator_type>;
49 
50  constexpr static const bool is_abstract_universe = false;
51  constexpr static const bool sequential = sub_type::sequential;
52  constexpr static const bool is_totally_ordered = false;
53  constexpr static const bool preserve_bot = true;
54  constexpr static const bool preserve_top = true;
55  // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
56  constexpr static const bool preserve_join = sub_type::preserve_join;
57  constexpr static const bool preserve_meet = sub_type::preserve_meet;
58  constexpr static const bool injective_concretization = sub_type::injective_concretization;
59  constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
60 
61  constexpr static const char* name = "SplitStrategy";
62 
63  template <class Alloc>
64  struct snapshot_type {
68 
73  {}
74 
79 
80  template<class SplitSnapshot>
81  CUDA snapshot_type(const SplitSnapshot& other, const Alloc&)
85  {}
86  };
87 
88  /** A split strategy consists of a variable order and value order on a subset of the variables. */
89  template <class Alloc2>
90  struct strategy_type {
93  battery::vector<AVar, Alloc2> vars;
94 
95  CUDA strategy_type(const Alloc2& alloc = Alloc2{}): var_order(VariableOrder::INPUT_ORDER), val_order(ValueOrder::MIN), vars(alloc) {}
99 
100  CUDA strategy_type(VariableOrder var_order, ValueOrder val_order, battery::vector<AVar, Alloc2>&& vars)
101  : var_order(var_order), val_order(val_order), vars(std::move(vars)) {}
102 
103  using allocator_type = Alloc2;
105  return vars.get_allocator();
106  }
107 
108  template <class StrategyType>
109  CUDA strategy_type(const StrategyType& other, const Alloc2& alloc = Alloc2{})
110  : var_order(other.var_order), val_order(other.val_order), vars(other.vars, alloc) {}
111 
112  template <class Alloc3>
113  friend class strategy_type;
114  };
115 
116  template <class Alloc2>
117  using tell_type = battery::vector<strategy_type<Alloc2>, Alloc2>;
118 
119  template <class A2, class Alloc2>
120  friend class SplitStrategy;
121 
122 private:
123  using universe_type = typename A::universe_type;
124  using LB = typename universe_type::LB;
125  using UB = typename universe_type::UB;
126 
127  AType atype;
128  abstract_ptr<A> a;
129  battery::vector<strategy_type<allocator_type>, allocator_type> strategies;
130  int current_strategy;
131  int next_unassigned_var;
132 
133  CUDA const battery::vector<AVar, allocator_type>& current_vars() const {
134  return strategies[current_strategy].vars;
135  }
136 
137  CUDA NI void move_to_next_unassigned_var() {
138  while(current_strategy < strategies.size()) {
139  const auto& vars = current_vars();
140  while(next_unassigned_var < vars.size()) {
141  universe_type v{};
142  a->project(vars[next_unassigned_var], v);
143  if(v.lb().value() != v.ub().value()) {
144  return;
145  }
146  next_unassigned_var++;
147  }
148  current_strategy++;
149  next_unassigned_var = 0;
150  }
151  }
152 
153  template <class MapFunction>
154  CUDA NI AVar var_map_fold_left(const battery::vector<AVar, allocator_type>& vars, MapFunction op) {
155  int i = next_unassigned_var;
156  int best_i = i;
157  auto best = op(a->project(vars[i]));
158  for(++i; i < vars.size(); ++i) {
159  const auto& u = a->project(vars[i]);
160  if(u.lb().value() != u.ub().value()) {
161  if(best.meet(op(u))) {
162  best_i = i;
163  }
164  }
165  }
166  return vars[best_i];
167  }
168 
169  CUDA AVar select_var() {
170  const auto& strat = strategies[current_strategy];
171  const auto& vars = strat.vars;
172  switch(strat.var_order) {
173  case VariableOrder::INPUT_ORDER: return vars[next_unassigned_var];
174  case VariableOrder::FIRST_FAIL: return var_map_fold_left(vars, [](const universe_type& u) { return u.width().ub(); });
175  case VariableOrder::ANTI_FIRST_FAIL: return var_map_fold_left(vars, [](const universe_type& u) { return dual_bound<LB>(u.width().ub()); });
176  case VariableOrder::LARGEST: return var_map_fold_left(vars, [](const universe_type& u) { return dual_bound<LB>(u.ub()); });
177  case VariableOrder::SMALLEST: return var_map_fold_left(vars, [](const universe_type& u) { return dual_bound<UB>(u.lb()); });
178  default: printf("BUG: unsupported variable order strategy\n"); assert(false); return AVar{};
179  }
180  }
181 
182  template <class U>
183  CUDA NI branch_type make_branch(AVar x, Sig left_op, Sig right_op, const U& u) {
184  if((u.is_top() && U::preserve_top) || (u.is_bot() && U::preserve_bot)) {
185  if(u.is_top()) {
186  printf("%% WARNING: Cannot currently branch on unbounded variables.\n");
187  }
188  return branch_type(get_allocator());
189  }
190  using F = TFormula<allocator_type>;
191  using branch_vector = battery::vector<sub_tell_type, allocator_type>;
192  VarEnv<allocator_type> empty_env{};
193  auto k = u.template deinterpret<F>();
194  IDiagnostics diagnostics;
196  sub_tell_type right(get_allocator());
197  bool res = a->interpret_tell(F::make_binary(F::make_avar(x), left_op, k, x.aty(), get_allocator()), empty_env, left, diagnostics);
198  res &= a->interpret_tell(F::make_binary(F::make_avar(x), right_op, k, x.aty(), get_allocator()), empty_env, right, diagnostics);
199  if(res) {
200  return Branch(branch_vector({std::move(left), std::move(right)}, get_allocator()));
201  }
202  // Fallback on a more standard split search strategy.
203  // We don't print anything because it might interfere with the output (without lock).
204  else if(left_op != LEQ || right_op != GT) {
205  return make_branch(x, LEQ, GT, u);
206  }
207  else {
208  printf("%% WARNING: The subdomain does not support the underlying search strategy.\n");
209  // Diagnostics mode.
210  a->template interpret_tell<true>(F::make_binary(F::make_avar(x), left_op, k, x.aty(), get_allocator()), empty_env, left, diagnostics);
211  a->template interpret_tell<true>(F::make_binary(F::make_avar(x), right_op, k, x.aty(), get_allocator()), empty_env, right, diagnostics);
212  diagnostics.print();
213  return branch_type(get_allocator());
214  }
215  }
216 
217 public:
218  CUDA SplitStrategy(AType atype, abstract_ptr<A> a, const allocator_type& alloc = allocator_type()):
219  atype(atype), a(a), current_strategy(0), next_unassigned_var(0), strategies(alloc)
220  {}
221 
222  template<class A2, class Alloc2, class... Allocators>
223  CUDA SplitStrategy(const SplitStrategy<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
224  : atype(other.atype),
225  a(deps.template clone<A>(other.a)),
226  strategies(other.strategies, deps.template get_allocator<allocator_type>()),
227  current_strategy(other.current_strategy),
228  next_unassigned_var(other.next_unassigned_var)
229  {}
230 
231  CUDA AType aty() const {
232  return atype;
233  }
234 
236  return strategies.get_allocator();
237  }
238 
239  template <class Alloc2 = allocator_type>
240  CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
241  return snapshot_type<Alloc2>{strategies.size(), current_strategy, next_unassigned_var};
242  }
243 
244  template <class Alloc2 = allocator_type>
245  CUDA void restore(const snapshot_type<Alloc2>& snap) {
246  while(strategies.size() > snap.num_strategies) {
247  strategies.pop_back();
248  }
249  current_strategy = snap.current_strategy;
250  next_unassigned_var = snap.next_unassigned_var;
251  }
252 
253  /** Restart the search from the first variable. */
254  CUDA void reset() {
255  current_strategy = 0;
256  next_unassigned_var = 0;
257  }
258 
259  /** This interpretation function expects `f` to be a predicate of the form `search(VariableOrder, ValueOrder, x_1, x_2, ..., x_n)`. */
260  template <bool diagnose = false, class F, class Env, class Alloc2>
261  CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
262  if(!(f.is(F::ESeq)
263  && f.eseq().size() >= 2
264  && f.esig() == "search"
265  && f.eseq()[0].is(F::ESeq) && f.eseq()[0].eseq().size() == 0
266  && f.eseq()[1].is(F::ESeq) && f.eseq()[1].eseq().size() == 0))
267  {
268  RETURN_INTERPRETATION_ERROR("SplitStrategy can only interpret predicates of the form `search(input_order, indomain_min, x1, ..., xN)`.");
269  }
270  strategy_type<Alloc2> strat{tell.get_allocator()};
271  const auto& var_order_str = f.eseq()[0].esig();
272  const auto& val_order_str = f.eseq()[1].esig();
273  if(var_order_str == "input_order") { strat.var_order = VariableOrder::INPUT_ORDER; }
274  else if(var_order_str == "first_fail") { strat.var_order = VariableOrder::FIRST_FAIL; }
275  else if(var_order_str == "anti_first_fail") { strat.var_order = VariableOrder::ANTI_FIRST_FAIL; }
276  else if(var_order_str == "smallest") { strat.var_order = VariableOrder::SMALLEST; }
277  else if(var_order_str == "largest") { strat.var_order = VariableOrder::LARGEST; }
278  else {
279  RETURN_INTERPRETATION_ERROR("This variable order strategy is unsupported.");
280  }
281  if(val_order_str == "indomain_min") { strat.val_order = ValueOrder::MIN; }
282  else if(val_order_str == "indomain_max") { strat.val_order = ValueOrder::MAX; }
283  else if(val_order_str == "indomain_median") { strat.val_order = ValueOrder::MEDIAN; }
284  else if(val_order_str == "indomain_split") { strat.val_order = ValueOrder::SPLIT; }
285  else if(val_order_str == "indomain_reverse_split") { strat.val_order = ValueOrder::REVERSE_SPLIT; }
286  else {
287  RETURN_INTERPRETATION_ERROR("This value order strategy is unsupported.");
288  }
289  for(int i = 2; i < f.eseq().size(); ++i) {
290  if(f.eseq(i).is(F::LV)) {
291  strat.vars.push_back(AVar{});
292  if(!env.interpret(f.eseq(i), strat.vars.back(), diagnostics)) {
293  return false;
294  }
295  }
296  else if(f.eseq(i).is(F::V)) {
297  strat.vars.push_back(f.eseq(i).v());
298  }
299  else if(num_vars(f.eseq(i)) > 0) {
300  RETURN_INTERPRETATION_ERROR("The predicate `search` only supports variables or constants, but an expression containing a variable was passed to it.");
301  }
302  // Ignore constant expressions.
303  else {}
304  }
305  if(strat.vars.size() == 0) {
306  RETURN_INTERPRETATION_WARNING("The predicate `search` has no variable, and thus it is ignored.");
307  }
308  tell.push_back(std::move(strat));
309  return true;
310  }
311 
312  template <IKind kind, bool diagnose = false, class F, class Env, class I>
313  CUDA bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
314  static_assert(kind == IKind::TELL);
315  return interpret_tell<diagnose>(f, env, intermediate, diagnostics);
316  }
317 
318  /** This deduce method adds new strategies, and therefore do not satisfy the PCCP model.
319  * Calling this function multiple times will create multiple strategies, that will be called in sequence along a branch of the search tree.
320  * @sequential
321  */
322  template <class Alloc2>
323  CUDA local::B deduce(const tell_type<Alloc2>& t) {
324  local::B has_changed = false;
325  for(int i = 0; i < t.size(); ++i) {
326  if(t[i].vars.size() > 0) {
327  strategies.push_back(t[i]);
328  has_changed = true;
329  }
330  }
331  return has_changed;
332  }
333 
334  /** Split the next unassigned variable according to the current strategy.
335  * If all variables of the current strategy are assigned, use the next strategy.
336  * If no strategy remains, returns an empty set of branches.
337 
338  If the next unassigned variable cannot be split, for instance because the value ordering strategy maps to `bot` or `top`, an empty set of branches is returned.
339  This also means that you cannot suppose `split(a) = {}` to mean `a` is at `bot`. */
340  CUDA NI branch_type split() {
341  if(a->is_bot()) {
342  return branch_type(get_allocator());
343  }
344  move_to_next_unassigned_var();
345  if(current_strategy < strategies.size()) {
346  AVar x = select_var();
347  // printf("split on %d\n", x.vid());
348  switch(strategies[current_strategy].val_order) {
349  case ValueOrder::MIN: return make_branch(x, EQ, GT, a->project(x).lb());
350  case ValueOrder::MAX: return make_branch(x, EQ, LT, a->project(x).ub());
351  case ValueOrder::MEDIAN: return make_branch(x, EQ, NEQ, a->project(x).median().lb());
352  case ValueOrder::SPLIT: return make_branch(x, LEQ, GT, a->project(x).median().lb());
353  case ValueOrder::REVERSE_SPLIT: return make_branch(x, GT, LEQ, a->project(x).median().lb());
354  default: printf("BUG: unsupported value order strategy\n"); assert(false); return branch_type(get_allocator());
355  }
356  }
357  else {
358  // printf("%% All variables are already assigned, we could not split anymore. It means the underlying abstract domain has not detected the satisfiability or unsatisfiability of the problem although all variables were assigned.\n");
359  return branch_type(get_allocator());
360  }
361  }
362 
363  CUDA size_t num_strategies() const {
364  return strategies.size();
365  }
366 };
367 
368 }
369 
370 #endif
Definition: branch.hpp:12
Definition: split_strategy.hpp:41
CUDA allocator_type get_allocator() const
Definition: split_strategy.hpp:235
constexpr static const bool preserve_meet
Definition: split_strategy.hpp:57
battery::vector< strategy_type< Alloc2 >, Alloc2 > tell_type
Definition: split_strategy.hpp:117
CUDA SplitStrategy(const SplitStrategy< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition: split_strategy.hpp:223
Allocator allocator_type
Definition: split_strategy.hpp:43
constexpr static const bool sequential
Definition: split_strategy.hpp:51
sub_type::template tell_type< allocator_type > sub_tell_type
Definition: split_strategy.hpp:46
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition: split_strategy.hpp:323
A sub_type
Definition: split_strategy.hpp:44
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition: split_strategy.hpp:261
typename sub_type::allocator_type sub_allocator_type
Definition: split_strategy.hpp:45
constexpr static const char * name
Definition: split_strategy.hpp:61
constexpr static const bool preserve_top
Definition: split_strategy.hpp:54
Branch< sub_tell_type, allocator_type > branch_type
Definition: split_strategy.hpp:47
constexpr static const bool preserve_concrete_covers
Definition: split_strategy.hpp:59
constexpr static const bool preserve_bot
Definition: split_strategy.hpp:53
CUDA AType aty() const
Definition: split_strategy.hpp:231
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition: split_strategy.hpp:240
CUDA SplitStrategy(AType atype, abstract_ptr< A > a, const allocator_type &alloc=allocator_type())
Definition: split_strategy.hpp:218
CUDA void reset()
Definition: split_strategy.hpp:254
CUDA bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: split_strategy.hpp:313
constexpr static const bool injective_concretization
Definition: split_strategy.hpp:58
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition: split_strategy.hpp:245
CUDA NI branch_type split()
Definition: split_strategy.hpp:340
constexpr static const bool is_abstract_universe
Definition: split_strategy.hpp:50
constexpr static const bool preserve_join
Definition: split_strategy.hpp:56
CUDA size_t num_strategies() const
Definition: split_strategy.hpp:363
constexpr static const bool is_totally_ordered
Definition: split_strategy.hpp:52
Definition: bab.hpp:13
VariableOrder
Definition: split_strategy.hpp:14
ValueOrder
Definition: split_strategy.hpp:28
Definition: split_strategy.hpp:64
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int next_unassigned_var
Definition: split_strategy.hpp:67
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(const SplitSnapshot &other, const Alloc &)
Definition: split_strategy.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
size_t num_strategies
Definition: split_strategy.hpp:65
CUDA snapshot_type(size_t num_strategies, int current_strategy, int next_unassigned_var)
Definition: split_strategy.hpp:69
int current_strategy
Definition: split_strategy.hpp:66
Definition: split_strategy.hpp:90
ValueOrder val_order
Definition: split_strategy.hpp:92
strategy_type(strategy_type< Alloc2 > &&)=default
strategy_type(const strategy_type< Alloc2 > &)=default
battery::vector< AVar, Alloc2 > vars
Definition: split_strategy.hpp:93
CUDA strategy_type(VariableOrder var_order, ValueOrder val_order, battery::vector< AVar, Alloc2 > &&vars)
Definition: split_strategy.hpp:100
CUDA strategy_type(const Alloc2 &alloc=Alloc2{})
Definition: split_strategy.hpp:95
strategy_type & operator=(strategy_type< Alloc2 > &&)=default
CUDA strategy_type(const StrategyType &other, const Alloc2 &alloc=Alloc2{})
Definition: split_strategy.hpp:109
CUDA allocator_type get_allocator() const
Definition: split_strategy.hpp:104
VariableOrder var_order
Definition: split_strategy.hpp:91
Alloc2 allocator_type
Definition: split_strategy.hpp:103