Lattice land propagators completion library
pc.hpp
Go to the documentation of this file.
1 // Copyright 2021 Pierre Talbot
2 
3 #ifndef LALA_PC_IPC_HPP
4 #define LALA_PC_IPC_HPP
5 
6 #include "battery/vector.hpp"
7 #include "battery/unique_ptr.hpp"
8 #include "battery/shared_ptr.hpp"
9 #include "battery/root_ptr.hpp"
10 #include "battery/allocator.hpp"
11 #include "battery/algorithm.hpp"
12 
13 #include "lala/logic/logic.hpp"
14 #include "lala/universes/arith_bound.hpp"
15 #include "lala/abstract_deps.hpp"
16 #include "lala/vstore.hpp"
17 
18 #include "terms.hpp"
19 #include "formula.hpp"
20 
21 namespace lala {
22 template <class A, class Alloc> class PC;
23 namespace impl {
24  template <class>
25  struct is_pc_like {
26  static constexpr bool value = false;
27  };
28  template<class A, class Alloc>
29  struct is_pc_like<PC<A, Alloc>> {
30  static constexpr bool value = true;
31  };
32 }
33 
34 /** PC is an abstract transformer built on top of an abstract domain `A`.
35  It is expected that `A` has a projection function `u = project(x)`.
36  We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
37  An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
38 template <class A, class Allocator = typename A::allocator_type>
39 class PC {
40 public:
41  using sub_type = A;
42  using universe_type = typename A::universe_type;
43  using local_universe_type = typename universe_type::local_type;
44  using allocator_type = Allocator;
45  using sub_allocator_type = typename A::allocator_type;
47 
48  template <class Alloc>
50  {
51  using sub_snap_type = A::template snapshot_type<Alloc>;
52  int num_props;
54 
57  , sub_snap(std::move(sub_snap))
58  {}
59 
64 
65  template <class SnapshotType>
66  CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
67  : num_props(other.num_props)
68  , sub_snap(other.sub_snap, alloc)
69  {}
70  };
71 
72  using sub_ptr = abstract_ptr<sub_type>;
73 
74  constexpr static const bool is_abstract_universe = false;
75  constexpr static const bool sequential = sub_type::sequential;
76  constexpr static const bool is_totally_ordered = false;
77  constexpr static const bool preserve_bot = true;
78  // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
79  constexpr static const bool preserve_top = sub_type::preserve_top;
80  constexpr static const bool preserve_join = sub_type::preserve_join;
81  constexpr static const bool preserve_meet = sub_type::preserve_meet;
82  constexpr static const bool injective_concretization = sub_type::injective_concretization;
83  constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
84  constexpr static const char* name = "PC";
85 
86  template <class A2, class Alloc2>
87  friend class PC;
88 
89 private:
92  template<class Alloc> using term_ptr = battery::unique_ptr<pc::Term<A, Alloc>, Alloc>;
93 
94  AType atype;
95  sub_ptr sub;
96  using props_type = battery::vector<formula_type, allocator_type>;
97  battery::root_ptr<props_type, allocator_type> props;
98 
99 public:
100  template <class Alloc>
101  using formula_seq = battery::vector<pc::Formula<A, Alloc>, Alloc>;
102 
103  template <class Alloc>
104  using term_seq = battery::vector<pc::Term<A, Alloc>, Alloc>;
105 
106  template <class Alloc, class SubType>
108  SubType sub_value;
110 
114 
115  CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
116  : sub_value(sub_value), props(alloc)
117  {}
118 
119  CUDA interpreted_type(const Alloc& alloc = Alloc{})
120  : sub_value(alloc), props(alloc) {}
121 
122  template <class InterpretedType>
123  CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
124  : sub_value(other.sub_value, alloc)
125  , props(other.props, alloc)
126  {}
127 
128  template <class Alloc2, class SubType2>
129  friend struct interpreted_type;
130  };
131 
132  template <class Alloc>
134 
135  template <class Alloc>
137 
138  CUDA PC(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
139  : atype(atype), sub(std::move(sub))
140  , props(battery::allocate_root<props_type, allocator_type>(alloc, alloc)) {}
141 
142  CUDA PC(PC&& other)
143  : atype(other.atype)
144  , props(std::move(other.props))
145  , sub(std::move(other.sub))
146  {}
147 
148 private:
149  // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
150  // This allows to save up memory and to avoid contention on L2 cache among blocks.
151  template<class A2, class Alloc2, class... Allocators>
152  CUDA static battery::root_ptr<props_type, allocator_type> init_props(const PC<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
153  auto alloc = deps.template get_allocator<allocator_type>();
154  if constexpr(std::is_same_v<allocator_type, Alloc2>) {
155  if(deps.is_shared_copy()) {
156  return other.props;
157  }
158  }
159  auto r = battery::allocate_root<props_type, allocator_type>(alloc, *(other.props), alloc);
160  return std::move(r);
161  }
162 
163 public:
164  template<class A2, class Alloc2, class... Allocators>
165  CUDA PC(const PC<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
166  : atype(other.atype)
167  , sub(deps.template clone<A>(other.sub))
168  , props(init_props(other, deps))
169  {}
170 
172  return props.get_allocator();
173  }
174 
175  CUDA AType aty() const {
176  return atype;
177  }
178 
179  CUDA static this_type bot(AType atype = UNTYPED,
180  AType atype_sub = UNTYPED,
181  const allocator_type& alloc = allocator_type(),
182  const sub_allocator_type& sub_alloc = sub_allocator_type())
183  {
184  return PC{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
185  }
186 
187  /** A special symbolic element representing top. */
188  CUDA static this_type top(AType atype = UNTYPED,
189  AType atype_sub = UNTYPED,
190  const allocator_type& alloc = allocator_type(),
191  const sub_allocator_type& sub_alloc = sub_allocator_type())
192  {
193  return PC{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
194  }
195 
196  template <class Env>
197  CUDA static this_type bot(Env& env,
198  const allocator_type& alloc = allocator_type(),
199  const sub_allocator_type& sub_alloc = sub_allocator_type())
200  {
201  AType atype_sub = env.extends_abstract_dom();
202  AType atype = env.extends_abstract_dom();
203  return bot(atype, atype_sub, alloc, sub_alloc);
204  }
205 
206  template <class Env>
207  CUDA static this_type top(Env& env,
208  const allocator_type& alloc = allocator_type(),
209  const sub_allocator_type& sub_alloc = sub_allocator_type())
210  {
211  AType atype_sub = env.extends_abstract_dom();
212  AType atype = env.extends_abstract_dom();
213  return top(atype, atype_sub, alloc, sub_alloc);
214  }
215 
216 private:
217  template <bool diagnose, class F, class Alloc>
218  CUDA bool interpret_unary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_ptr<Alloc>&& x) const {
219  using T = pc::Term<A, Alloc>;
220  Alloc alloc = x.get_allocator();
221  switch(f.sig()) {
222  case NEG: intermediate.push_back(T::make_neg(std::move(x))); break;
223  case ABS: intermediate.push_back(T::make_abs(std::move(x))); break;
224  default: RETURN_INTERPRETATION_ERROR("Unsupported unary symbol in a term.");
225  }
226  return true;
227  }
228 
229  template <bool diagnose, class F, class Alloc>
230  CUDA bool interpret_binary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_ptr<Alloc>&& x, term_ptr<Alloc>&& y) const
231  {
232  using T = pc::Term<A, Alloc>;
233  switch(f.sig()) {
234  case ADD: intermediate.push_back(T::make_add(std::move(x), std::move(y))); break;
235  case SUB: intermediate.push_back(T::make_sub(std::move(x), std::move(y))); break;
236  case MUL: intermediate.push_back(T::make_mul(std::move(x), std::move(y))); break;
237  case TDIV: intermediate.push_back(T::make_tdiv(std::move(x), std::move(y))); break;
238  case FDIV: intermediate.push_back(T::make_fdiv(std::move(x), std::move(y))); break;
239  case CDIV: intermediate.push_back(T::make_cdiv(std::move(x), std::move(y))); break;
240  case EDIV: intermediate.push_back(T::make_ediv(std::move(x), std::move(y))); break;
241  case MIN: intermediate.push_back(T::make_min(std::move(x), std::move(y))); break;
242  case MAX: intermediate.push_back(T::make_max(std::move(x), std::move(y))); break;
243  default: RETURN_INTERPRETATION_ERROR("Unsupported binary symbol in a term.");
244  }
245  return true;
246  }
247 
248  template <bool diagnose, class F, class Alloc>
249  CUDA bool interpret_nary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_seq<Alloc>&& operands) const
250  {
251  using T = pc::Term<A, Alloc>;
252  switch(f.sig()) {
253  case ADD: intermediate.push_back(T::make_naryadd(std::move(operands))); break;
254  case MUL: intermediate.push_back(T::make_narymul(std::move(operands))); break;
255  default: RETURN_INTERPRETATION_ERROR("Unsupported nary symbol in a term.");
256  }
257  return true;
258  }
259 
260  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
261  CUDA bool interpret_sequence(const F& f, Env& env, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const
262  {
263  using T = pc::Term<A, Alloc>;
264  Alloc alloc = intermediate.get_allocator();
265  term_seq<Alloc> subterms = term_seq<Alloc>(alloc);
266  formula_seq<Alloc> subformulas = formula_seq<Alloc>(alloc);
267  for(int i = 0; i < f.seq().size(); ++i) {
268  // We first try to interpret the formula `f.seq(i)` as a term, if that fails, try as a formula and wrap it in a term.
269  if(!interpret_term<kind, diagnose>(f.seq(i), env, subterms, diagnostics)) {
270  if(!interpret_formula<kind, diagnose>(f.seq(i), env, subformulas, diagnostics, true)) {
271  return false;
272  }
273  else {
274  subterms.push_back(T::make_formula(
275  battery::allocate_unique<pc::Formula<A, Alloc>>(alloc, std::move(subformulas.back()))));
276  }
277  }
278  }
279  if(subterms.size() == 1) {
280  return interpret_unary<diagnose>(f,
281  intermediate,
282  diagnostics,
283  battery::allocate_unique<T>(alloc, std::move(subterms[0])));
284  }
285  else if(subterms.size() == 2) {
286  return interpret_binary<diagnose>(f,
287  intermediate,
288  diagnostics,
289  battery::allocate_unique<T>(alloc, std::move(subterms[0])),
290  battery::allocate_unique<T>(alloc, std::move(subterms[1])));
291  }
292  else {
293  return interpret_nary<diagnose>(f,
294  intermediate,
295  diagnostics,
296  std::move(subterms));
297  }
298  }
299 
300  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
301  CUDA bool interpret_term(const F& f, Env& env, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
302  using T = pc::Term<A, Alloc>;
303  using F2 = TFormula<Alloc>;
304  if(f.is_variable()) {
305  AVar avar;
306  if(env.template interpret<diagnose>(f, avar, diagnostics)) {
307  intermediate.push_back(T::make_var(avar));
308  return true;
309  }
310  else {
311  return false;
312  }
313  }
314  else if(f.is_constant()) {
315  auto constant = F2::make_binary(F2::make_avar(AVar{}), EQ, f, UNTYPED, intermediate.get_allocator());
317  if(local_universe_type::template interpret<kind, diagnose>(constant, env, k, diagnostics)) {
318  intermediate.push_back(T::make_constant(std::move(k)));
319  return true;
320  }
321  else {
322  RETURN_INTERPRETATION_ERROR("Constant in a term could not be interpreted in the underlying abstract universe.");
323  }
324  }
325  else if(f.is(F::Seq)) {
326  return interpret_sequence<kind, diagnose>(f, env, intermediate, diagnostics);
327  }
328  else {
329  RETURN_INTERPRETATION_ERROR("The shape of the formula is not supported in PC, and could not be interpreted as a term.");
330  }
331  }
332 
333  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
334  CUDA bool interpret_negation(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context) const {
335  auto nf = negate(f);
336  if(nf.has_value()) {
337  return interpret_formula<kind, diagnose>(*nf, env, intermediate, diagnostics, neg_context);
338  }
339  else {
340  RETURN_INTERPRETATION_ERROR("We must query this formula for disentailement, but we could not compute its negation.");
341  }
342  }
343 
344  template <IKind kind, bool diagnose, class F, class Env, class Alloc, class Create>
345  CUDA bool interpret_binary_logical_connector(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context, Create&& create) const {
346  using PF = pc::Formula<A, Alloc>;
347  Alloc alloc = intermediate.get_allocator();
348  formula_seq<Alloc> operands = formula_seq<Alloc>(alloc);
349  if( interpret_formula<kind, diagnose>(f, env, operands, diagnostics, neg_context)
350  && interpret_formula<kind, diagnose>(g, env, operands, diagnostics, neg_context))
351  {
352  intermediate.push_back(create(
353  battery::allocate_unique<PF>(alloc, std::move(operands[0])),
354  battery::allocate_unique<PF>(alloc, std::move(operands[1]))));
355  return true;
356  }
357  else {
358  return false;
359  }
360  }
361 
362  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
363  CUDA bool interpret_conjunction(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context) const {
364  using PF = pc::Formula<A, Alloc>;
365  return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, neg_context,
366  [](auto&& l, auto&& k) { return PF::make_conj(std::move(l), std::move(k)); });
367  }
368 
369  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
370  CUDA bool interpret_disjunction(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
371  using PF = pc::Formula<A, Alloc>;
372  return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
373  [](auto&& l, auto&& k) { return PF::make_disj(std::move(l), std::move(k)); });
374  }
375 
376  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
377  CUDA bool interpret_biconditional(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
378  using PF = pc::Formula<A, Alloc>;
379  return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
380  [](auto&& l, auto&& k) { return PF::make_bicond(std::move(l), std::move(k)); });
381  }
382 
383  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
384  CUDA bool interpret_implication(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
385  using PF = pc::Formula<A, Alloc>;
386  return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
387  [](auto&& l, auto&& k) { return PF::make_imply(std::move(l), std::move(k)); });
388  }
389 
390  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
391  CUDA bool interpret_xor(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
392  using PF = pc::Formula<A, Alloc>;
393  return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
394  [](auto&& l, auto&& k) { return PF::make_xor(std::move(l), std::move(k)); });
395  }
396 
397  template <IKind kind, bool diagnose, class F, class Env, class Alloc, class Create>
398  CUDA bool interpret_binary_predicate(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, Create&& create) const
399  {
400  using PF = pc::Formula<A, Alloc>;
401  using T = pc::Term<A, Alloc>;
402  Alloc alloc = intermediate.get_allocator();
403  term_seq<Alloc> operands = term_seq<Alloc>(alloc);
404  if( interpret_term<kind, diagnose>(f, env, operands, diagnostics)
405  && interpret_term<kind, diagnose>(g, env, operands, diagnostics))
406  {
407  intermediate.push_back(create(std::move(operands[0]), std::move(operands[1])));
408  return true;
409  }
410  else {
411  return false;
412  }
413  }
414 
415  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
416  CUDA bool interpret_equality(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
417  using PF = pc::Formula<A, Alloc>;
418  return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
419  [](auto&& l, auto&& k) { return PF::make_eq(std::move(l), std::move(k)); });
420  }
421 
422  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
423  CUDA bool interpret_disequality(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
424  using PF = pc::Formula<A, Alloc>;
425  return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
426  [](auto&& l, auto&& k) { return PF::make_neq(std::move(l), std::move(k)); });
427  }
428 
429  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
430  CUDA bool interpret_inequalityLEQ(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
431  using PF = pc::Formula<A, Alloc>;
432  return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
433  [](auto&& l, auto&& k) { return PF::make_leq(std::move(l), std::move(k)); });
434  }
435 
436  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
437  CUDA bool interpret_inequalityGT(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
438  using PF = pc::Formula<A, Alloc>;
439  return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
440  [](auto&& l, auto&& k) { return PF::make_gt(std::move(l), std::move(k)); });
441  }
442 
443  template <bool neg, bool diagnose, class F, class Env, class Alloc>
444  CUDA bool interpret_literal(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
445  using PF = pc::Formula<A, Alloc>;
446  AVar avar{};
447  if(env.template interpret<diagnose>(f, avar, diagnostics)) {
448  if constexpr(neg) {
449  intermediate.push_back(PF::make_nvarlit(avar));
450  }
451  else {
452  intermediate.push_back(PF::make_pvarlit(avar));
453  }
454  return true;
455  }
456  return false;
457  }
458 
459  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
460  CUDA bool interpret_in_decomposition(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context = false) const {
461  return interpret_formula<kind, diagnose>(
462  decompose_in_constraint(f),
463  env, intermediate, diagnostics, neg_context);
464  }
465 
466  template <class F>
467  CUDA F binarize(const F& f, int i) const {
468  assert(f.is(F::Seq) && f.seq().size() >= 2);
469  if(i + 2 == f.seq().size()) {
470  return F::make_binary(f.seq(i), f.sig(), f.seq(i+1), f.type(), f.seq().get_allocator(), false);
471  }
472  else {
473  return F::make_binary(f.seq(i), f.sig(), binarize(f, i+1), f.type(), f.seq().get_allocator(), false);
474  }
475  }
476 
477  template <bool diagnose, class F, class Env, class Alloc>
478  CUDA bool interpret_abstract_element(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
479  auto nf = negate(f);
480  if(!nf.has_value()) {
481  RETURN_INTERPRETATION_ERROR("We must query this formula for disentailement, but we could not compute its negation.");
482  }
483  else {
484  typename A::template tell_type<Alloc> tellv(intermediate.get_allocator());
485  typename A::template tell_type<Alloc> not_tellv(intermediate.get_allocator());
486  typename A::template tell_type<Alloc> askv(intermediate.get_allocator());
487  typename A::template tell_type<Alloc> not_askv(intermediate.get_allocator());
488  if( sub->template interpret<IKind::TELL, diagnose>(f, env, tellv, diagnostics)
489  && sub->template interpret<IKind::TELL, diagnose>(nf.value(), env, not_tellv, diagnostics)
490  && sub->template interpret<IKind::ASK, diagnose>(f, env, askv, diagnostics)
491  && sub->template interpret<IKind::ASK, diagnose>(nf.value(), env, not_askv, diagnostics))
492  {
493  using PF = pc::Formula<A, Alloc>;
494  intermediate.push_back(PF::make_abstract_element(std::move(tellv), std::move(not_tellv), std::move(askv), std::move(not_askv)));
495  return true;
496  }
497  return false;
498  }
499  }
500 
501  /** We interpret the formula `f` in the value `intermediate`, note that we only grow `intermediate` by 0 (if interpretation failed) or 1 (if it succeeds).
502  * It is convenient to use a vector because it carries the allocator, and it is the type of the `props` component of the tell/ask type.
503  */
504  template <IKind kind, bool diagnose, class F, class Env, class Alloc>
505  CUDA bool interpret_formula(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context = false) const {
506  using PF = pc::Formula<A, Alloc>;
507  using F2 = TFormula<Alloc>;
508  Alloc alloc = intermediate.get_allocator();
509  if(f.type() != aty() && !f.is_untyped() && !f.is_variable()) {
510  if(!neg_context) {
511  RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
512  }
513  return interpret_abstract_element<diagnose>(f, env, intermediate, diagnostics);
514  }
515  if(f.is_false()) {
516  intermediate.push_back(PF::make_false());
517  return true;
518  }
519  else if(f.is_true()) {
520  intermediate.push_back(PF::make_true());
521  return true;
522  }
523  // Negative literal
524  else if(f.is(F::Seq) && f.seq().size() == 1 && f.sig() == NOT &&
525  f.seq(0).is_variable())
526  {
527  return interpret_literal<true, diagnose>(f.seq(0), env, intermediate, diagnostics);
528  }
529  // Positive literal
530  else if(f.is_variable()) {
531  return interpret_literal<false, diagnose>(f, env, intermediate, diagnostics);
532  }
533  // Logical negation
534  else if(f.is(F::Seq) && f.seq().size() == 1 && f.sig() == NOT) {
535  return interpret_negation<kind, diagnose>(f.seq(0), env, intermediate, diagnostics, neg_context);
536  }
537  // In constraint
538  else if(f.is(F::Seq) && f.sig() == IN) {
539  return interpret_in_decomposition<kind, diagnose>(f, env, intermediate, diagnostics, neg_context);
540  }
541  // Binarize logical connectors
542  else if(f.is(F::Seq) && f.seq().size() > 2 && (f.sig() == AND || f.sig() == OR || f.sig() == EQUIV)) {
543  return interpret_formula<kind, diagnose>(binarize(f,0), env, intermediate, diagnostics, neg_context);
544  }
545  else if(f.is_binary()) {
546  Sig sig = f.sig();
547  switch(sig) {
548  case AND: return interpret_conjunction<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics, neg_context);
549  case OR: return interpret_disjunction<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
550  case EQUIV: return interpret_biconditional<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
551  case IMPLY: return interpret_implication<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
552  case XOR: return interpret_xor<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
553  case EQ: {
554  // Whenever an operand of `=` is a formula with logical connectors or predicate, we interpret `=` as `<=>`.
555  if(f.seq(0).is_logical() || f.seq(1).is_logical() || f.seq(0).is_predicate() || f.seq(1).is_predicate()) {
556  return interpret_biconditional<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
557  }
558  else {
559  return interpret_equality<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
560  }
561  }
562  case NEQ: return interpret_disequality<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
563  case LEQ: return interpret_inequalityLEQ<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
564  case GT: return interpret_inequalityGT<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
565  case GEQ: return interpret_inequalityLEQ<kind, diagnose>(f.seq(1), f.seq(0), env, intermediate, diagnostics);
566  case LT: return interpret_inequalityGT<kind, diagnose>(f.seq(1), f.seq(0), env, intermediate, diagnostics);
567  default:
568  RETURN_INTERPRETATION_ERROR("Unsupported binary symbol in a formula.");
569  }
570  }
571  RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
572  }
573 
574 public:
575  template <IKind kind, bool diagnose = false, class F, class Env, class I>
576  CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
577  size_t error_context = 0;
578  if constexpr(diagnose) {
579  diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PC and its sub-domain.", f));
580  error_context = diagnostics.num_suberrors();
581  }
582  bool res = false;
583  AType current = f.type();
584  const_cast<F&>(f).type_as(sub->aty()); // We restore the type after the call to sub->interpret.
585  if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
586  // A successful interpretation in the sub-domain does not mean it is interpreted exactly.
587  // Sometimes, we can improve the precision by interpreting it in PC.
588  // This is the case of `x in S` predicate for sub-domain that do not preserve meet.
589  if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
590  res = true; // it is not a formula `x in S`.
591  }
592  else {
593  res = universe_type::preserve_join; // it is `x in S` but it preserves join.
594  }
595  }
596  const_cast<F&>(f).type_as(current);
597  if(!res) {
598  res = interpret_formula<kind, diagnose>(f, env, intermediate.props, diagnostics);
599  }
600  if constexpr(diagnose) {
601  diagnostics.merge(res, error_context);
602  }
603  return res;
604  }
605 
606  /** PC expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
607  */
608  template <bool diagnose = false, class F, class Env, class Alloc2>
609  CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
610  return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
611  }
612 
613  template <bool diagnose = false, class F, class Env, class Alloc2>
614  CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
615  return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
616  }
617 
618  /** Note that we cannot add propagators in parallel (but modifying the underlying domain is ok).
619  This is a current limitation that we should fix later on.
620  Notes for later:
621  * To implement "telling of propagators", we would need to check if a propagator has already been added or not (for idempotency).
622  * 1. Walk through the existing propagators to check which ones are already in.
623  * 2. If a propagator has the same shape but different constant `U`, meet them in place. */
624  template <class Alloc2>
625  CUDA local::B deduce(const tell_type<Alloc2>& t) {
626  local::B has_changed = sub->deduce(t.sub_value);
627  if(t.props.size() > 0) {
628  auto& props2 = *props;
629  int n = props2.size();
630  props2.reserve(n + t.props.size());
631  for(int i = 0; i < t.props.size(); ++i) {
632  props2.push_back(formula_type(t.props[i], get_allocator()));
633  }
634  battery::vector<int> lengths(props2.size());
635  for(int i = 0; i < props2.size(); ++i) {
636  lengths[i] = props2[i].length();
637  }
638  battery::sorti(props2,
639  [&](int i, int j) {
640  return props2[i].kind() < props2[j].kind() || (props2[i].kind() == props2[j].kind() && lengths[i] < lengths[j]);
641  });
642  return true;
643  }
644  return has_changed;
645  }
646 
647  CUDA bool embed(AVar x, const universe_type& dom) {
648  return sub->embed(x, dom);
649  }
650 
651  template <class Alloc2>
652  CUDA local::B ask(const ask_type<Alloc2>& t) const {
653  for(int i = 0; i < t.props.size(); ++i) {
654  if(!t.props[i].ask(*sub)) {
655  return false;
656  }
657  }
658  return sub->ask(t.sub_value);
659  }
660 
661  CUDA local::B ask(int i) const {
662  return (*props)[i].ask(*sub);
663  }
664 
665  CUDA int num_deductions() const {
666  return sub->num_deductions() + props->size();
667  }
668 
669  CUDA local::B deduce(int i) {
670  assert(i < num_deductions());
671  if(is_top()) { return false; }
672  else if(i < sub->num_deductions()) {
673  return sub->deduce(i);
674  }
675  else {
676  return (*props)[i - sub->num_deductions()].deduce(*sub);
677  }
678  }
679 
680  // Functions forwarded to the sub-domain `A`.
681 
682  /** `true` if the underlying abstract element is bot, `false` otherwise. */
683  CUDA local::B is_bot() const {
684  return sub->is_bot();
685  }
686 
687  /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
688  CUDA local::B is_top() const {
689  return sub->is_top() && props->size() == 0;
690  }
691 
692  CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
693  return (*sub)[x];
694  }
695 
696  CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
697  return sub->project(x);
698  }
699 
700  template <class Univ>
701  CUDA void project(AVar x, Univ& u) const {
702  sub->project(x, u);
703  }
704 
705  CUDA int vars() const {
706  return sub->vars();
707  }
708 
709  template <class Alloc2 = allocator_type>
710  CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
711  return snapshot_type<Alloc2>(props->size(), sub->snapshot(alloc));
712  }
713 
714  template <class Alloc2>
715  CUDA void restore(const snapshot_type<Alloc2>& snap) {
716  int n = props->size();
717  for(int i = snap.num_props; i < n; ++i) {
718  props->pop_back();
719  }
720  sub->restore(snap.sub_snap);
721  }
722 
723  /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
724  template <class ExtractionStrategy = NonAtomicExtraction>
725  CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
726  if(is_bot()) {
727  return false;
728  }
729  for(int i = 0; i < props->size(); ++i) {
730  if(!ask(i)) {
731  return false;
732  }
733  }
734  return true;
735  // return sub->is_extractable(strategy);
736  }
737 
738  /** Extract the current element into `ua`.
739  * \pre `is_extractable()` must be `true`.
740  * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
741  * (It is OK, since they are entailed, they don't bring information anymore.) */
742  template <class B>
743  CUDA void extract(B& ua) const {
744  if constexpr(impl::is_pc_like<B>::value) {
745  sub->extract(*ua.sub);
746  }
747  else {
748  sub->extract(ua);
749  }
750  }
751 
752  template<class Env, class Allocator2 = typename Env::allocator_type>
753  CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
754  using F = TFormula<Allocator2>;
755  if(props->size() == 0) {
756  return sub->deinterpret(env, allocator);
757  }
758  typename F::Sequence seq{allocator};
759  seq.push_back(sub->deinterpret(env, allocator));
760  for(int i = 0; i < props->size(); ++i) {
761  seq.push_back((*props)[i].deinterpret(*sub, env, aty(), allocator));
762  }
763  return F::make_nary(AND, std::move(seq), aty());
764  }
765 
766  template<class I, class Env, class Allocator2 = typename Env::allocator_type>
767  CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
768  using F = TFormula<Allocator2>;
769  typename F::Sequence seq{allocator};
770  seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
771  for(int i = 0; i < intermediate.props.size(); ++i) {
772  seq.push_back(intermediate.props[i].deinterpret(*sub, env, aty(), allocator));
773  }
774  return F::make_nary(AND, std::move(seq), aty());
775  }
776 };
777 
778 }
779 
780 #endif
Definition: pc.hpp:39
CUDA AType aty() const
Definition: pc.hpp:175
CUDA int vars() const
Definition: pc.hpp:705
A sub_type
Definition: pc.hpp:41
constexpr static const bool preserve_join
Definition: pc.hpp:80
CUDA allocator_type get_allocator() const
Definition: pc.hpp:171
CUDA PC(const PC< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition: pc.hpp:165
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pc.hpp:179
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition: pc.hpp:710
CUDA PC(PC &&other)
Definition: pc.hpp:142
constexpr static const bool is_totally_ordered
Definition: pc.hpp:76
typename A::allocator_type sub_allocator_type
Definition: pc.hpp:45
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition: pc.hpp:625
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: pc.hpp:576
CUDA void project(AVar x, Univ &u) const
Definition: pc.hpp:701
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pc.hpp:207
battery::vector< pc::Formula< A, Alloc >, Alloc > formula_seq
Definition: pc.hpp:101
typename A::universe_type universe_type
Definition: pc.hpp:42
CUDA int num_deductions() const
Definition: pc.hpp:665
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition: pc.hpp:725
CUDA void extract(B &ua) const
Definition: pc.hpp:743
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pc.hpp:188
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition: pc.hpp:609
constexpr static const bool preserve_top
Definition: pc.hpp:79
constexpr static const bool injective_concretization
Definition: pc.hpp:82
typename universe_type::local_type local_universe_type
Definition: pc.hpp:43
constexpr static const bool preserve_bot
Definition: pc.hpp:77
CUDA auto operator[](int x) const
Definition: pc.hpp:692
CUDA local::B is_bot() const
Definition: pc.hpp:683
CUDA local::B is_top() const
Definition: pc.hpp:688
Allocator allocator_type
Definition: pc.hpp:44
constexpr static const bool is_abstract_universe
Definition: pc.hpp:74
CUDA PC(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition: pc.hpp:138
constexpr static const bool sequential
Definition: pc.hpp:75
CUDA auto project(AVar x) const
Definition: pc.hpp:696
constexpr static const bool preserve_meet
Definition: pc.hpp:81
CUDA local::B ask(int i) const
Definition: pc.hpp:661
battery::vector< pc::Term< A, Alloc >, Alloc > term_seq
Definition: pc.hpp:104
CUDA bool embed(AVar x, const universe_type &dom)
Definition: pc.hpp:647
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pc.hpp:767
constexpr static const char * name
Definition: pc.hpp:84
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition: pc.hpp:715
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pc.hpp:753
CUDA local::B deduce(int i)
Definition: pc.hpp:669
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition: pc.hpp:652
abstract_ptr< sub_type > sub_ptr
Definition: pc.hpp:72
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pc.hpp:197
constexpr static const bool preserve_concrete_covers
Definition: pc.hpp:83
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition: pc.hpp:614
Definition: formula.hpp:864
Definition: formula.hpp:8
Definition: pc.hpp:107
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition: pc.hpp:108
interpreted_type(const interpreted_type &)=default
interpreted_type & operator=(interpreted_type &&)=default
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition: pc.hpp:119
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition: pc.hpp:123
formula_seq< Alloc > props
Definition: pc.hpp:109
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition: pc.hpp:115
Definition: pc.hpp:50
A::template snapshot_type< Alloc > sub_snap_type
Definition: pc.hpp:51
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(int num_props, sub_snap_type &&sub_snap)
Definition: pc.hpp:55
int num_props
Definition: pc.hpp:52
sub_snap_type sub_snap
Definition: pc.hpp:53
snapshot_type(snapshot_type< Alloc > &&)=default
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition: pc.hpp:66
snapshot_type(const snapshot_type< Alloc > &)=default