Lattice land propagators completion library
pir.hpp
Go to the documentation of this file.
1 // Copyright 2024 Pierre Talbot
2 
3 #ifndef LALA_PC_PIR_HPP
4 #define LALA_PC_PIR_HPP
5 
6 #include "battery/vector.hpp"
7 #include "battery/unique_ptr.hpp"
8 #include "battery/shared_ptr.hpp"
9 #include "battery/root_ptr.hpp"
10 #include "battery/allocator.hpp"
11 #include "battery/algorithm.hpp"
12 #include "battery/bitset.hpp"
13 
14 #include "lala/logic/logic.hpp"
15 #include "lala/logic/ternarize.hpp"
16 #include "lala/universes/arith_bound.hpp"
17 #include "lala/abstract_deps.hpp"
18 #include "lala/vstore.hpp"
19 
20 #include "terms.hpp"
21 
22 namespace lala {
23 
24 template <class A, class Alloc> class PIR;
25 namespace impl {
26  template <class>
27  struct is_pir_like {
28  static constexpr bool value = false;
29  };
30  template<class A, class Alloc>
31  struct is_pir_like<PIR<A, Alloc>> {
32  static constexpr bool value = true;
33  };
34 }
35 
36 // This represents the constraints `X = Y [op] Z`.
37 struct bytecode_type {
38  Sig op;
39  AVar x;
40  AVar y;
41  AVar z;
42  constexpr bytecode_type() = default;
43  constexpr bytecode_type(const bytecode_type&) = default;
44  CUDA INLINE const AVar& operator[](int i) const {
45  return i == 0 ? x : (i == 1 ? y : z);
46  }
47 };
48 
49 /** PIR is an abstract transformer built on top of an abstract domain `A`.
50  It is expected that `A` has a projection function `u = project(x)`.
51  We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
52  An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
53 template <class A, class Allocator = typename A::allocator_type>
54 class PIR {
55 public:
56  using sub_type = A;
57  using universe_type = typename A::universe_type;
58  using local_universe_type = typename universe_type::local_type;
59  using allocator_type = Allocator;
60  using sub_allocator_type = typename A::allocator_type;
62 
63  template <class Alloc>
65  {
66  using sub_snap_type = A::template snapshot_type<Alloc>;
69 
72  , sub_snap(std::move(sub_snap))
73  {}
74 
79 
80  template <class SnapshotType>
81  CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
82  : num_bytecodes(other.num_bytecodes)
83  , sub_snap(other.sub_snap, alloc)
84  {}
85  };
86 
87  using sub_ptr = abstract_ptr<sub_type>;
88 
89  constexpr static const bool is_abstract_universe = false;
90  constexpr static const bool sequential = sub_type::sequential;
91  constexpr static const bool is_totally_ordered = false;
92  constexpr static const bool preserve_bot = true;
93  // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
94  constexpr static const bool preserve_top = sub_type::preserve_top;
95  constexpr static const bool preserve_join = sub_type::preserve_join;
96  constexpr static const bool preserve_meet = sub_type::preserve_meet;
97  constexpr static const bool injective_concretization = sub_type::injective_concretization;
98  constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
99  constexpr static const char* name = "PIR";
100 
101  template <class A2, class Alloc2>
102  friend class PIR;
103 
104 private:
105  AType atype;
106  sub_ptr sub;
107 
108  const local_universe_type ZERO;
109  const local_universe_type ONE;
110 
111  static_assert(sizeof(int) == sizeof(AVar), "The size of AVar must be equal to the size of an int.");
112  static_assert(sizeof(int) == sizeof(Sig), "The size of Sig must be equal to the size of an int.");
113 
114  using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
115  using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>, allocator_type>;
116 
117  /** We represent the constraints X = Y [op] Z. */
118  bytecodes_ptr bytecodes;
119 
120  using LB = typename local_universe_type::LB;
121  using UB = typename local_universe_type::UB;
122 
123 public:
124  template <class Alloc, class SubType>
126  SubType sub_value;
127  battery::vector<bytecode_type, Alloc> bytecodes;
128 
132 
133  CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
135  , bytecodes(alloc)
136  {}
137 
138  CUDA interpreted_type(const Alloc& alloc = Alloc{})
139  : interpreted_type(SubType(alloc), alloc)
140  {}
141 
142  template <class InterpretedType>
143  CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
144  : sub_value(other.sub_value, alloc)
145  , bytecodes(other.bytecodes, alloc)
146  {}
147 
148  template <class Alloc2, class SubType2>
149  friend struct interpreted_type;
150  };
151 
152  template <class Alloc>
154 
155  template <class Alloc>
157 
158  CUDA PIR(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
159  : atype(atype), sub(std::move(sub))
160  , ZERO(local_universe_type::eq_zero())
161  , ONE(local_universe_type::eq_one())
162  , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, alloc))
163  {}
164 
165  CUDA PIR(PIR&& other)
166  : atype(other.atype)
167  , sub(std::move(other.sub))
168  , ZERO(std::move(other.ZERO))
169  , ONE(std::move(other.ONE))
170  , bytecodes(std::move(other.bytecodes))
171  {}
172 
173 private:
174  // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
175  // This allows to save up memory and to avoid contention on L2 cache among blocks.
176  template<class A2, class Alloc2, class... Allocators>
177  CUDA static bytecodes_ptr init_bytecodes(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
178  auto alloc = deps.template get_allocator<allocator_type>();
179  if constexpr(std::is_same_v<allocator_type, Alloc2>) {
180  if(deps.is_shared_copy()) {
181  return other.bytecodes;
182  }
183  }
184  bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
185  return std::move(r);
186  }
187 
188 public:
189  template<class A2, class Alloc2, class... Allocators>
190  CUDA PIR(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
191  : atype(other.atype)
192  , sub(deps.template clone<A>(other.sub))
193  , ZERO(other.ZERO)
194  , ONE(other.ONE)
195  , bytecodes(init_bytecodes(other, deps))
196  {}
197 
199  return bytecodes.get_allocator();
200  }
201 
202  CUDA AType aty() const {
203  return atype;
204  }
205 
206  CUDA static this_type bot(AType atype = UNTYPED,
207  AType atype_sub = UNTYPED,
208  const allocator_type& alloc = allocator_type(),
209  const sub_allocator_type& sub_alloc = sub_allocator_type())
210  {
211  return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
212  }
213 
214  /** A special symbolic element representing top. */
215  CUDA static this_type top(AType atype = UNTYPED,
216  AType atype_sub = UNTYPED,
217  const allocator_type& alloc = allocator_type(),
218  const sub_allocator_type& sub_alloc = sub_allocator_type())
219  {
220  return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
221  }
222 
223  template <class Env>
224  CUDA static this_type bot(Env& env,
225  const allocator_type& alloc = allocator_type(),
226  const sub_allocator_type& sub_alloc = sub_allocator_type())
227  {
228  AType atype_sub = env.extends_abstract_dom();
229  AType atype = env.extends_abstract_dom();
230  return bot(atype, atype_sub, alloc, sub_alloc);
231  }
232 
233  template <class Env>
234  CUDA static this_type top(Env& env,
235  const allocator_type& alloc = allocator_type(),
236  const sub_allocator_type& sub_alloc = sub_allocator_type())
237  {
238  AType atype_sub = env.extends_abstract_dom();
239  AType atype = env.extends_abstract_dom();
240  return top(atype, atype_sub, alloc, sub_alloc);
241  }
242 
243 private:
244  /** We interpret the formula `f` in the value `intermediate`, note that we only add one constraint to `intermediate` if the interpretation succeeds. */
245  template <IKind kind, bool diagnose, class F, class Env, class Intermediate>
246  CUDA bool interpret_formula(const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics) const {
247  if(f.type() != aty() && !f.is_untyped()) {
248  RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
249  }
250  if(f.is_binary()) {
251  Sig sig = f.sig();
252  // Expect constraint of the form X = Y <OP> Z, or Y <OP> Z = X.
253  int left = f.seq(0).is_binary();
254  int right = f.seq(1).is_binary();
255  if(sig == EQ && (left || right)) {
256  auto& X = f.seq(left);
257  auto& Y = f.seq(right).seq(0);
258  auto& Z = f.seq(right).seq(1);
259  bytecode_type bytecode;
260  bytecode.op = f.seq(right).sig();
261  if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
262  (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == SUB || bytecode.op == EDIV || bytecode.op == EMOD
263  || bytecode.op == MIN || bytecode.op == MAX
264  || bytecode.op == EQ || bytecode.op == LEQ))
265  {
266  if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
267  && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
268  && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
269  {
270  intermediate.bytecodes.push_back(bytecode);
271  return true;
272  }
273  return false;
274  }
275  }
276  }
277  RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
278  }
279 
280 public:
281  template <IKind kind, bool diagnose = false, class F, class Env, class I>
282  CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
283  size_t error_context = 0;
284  if constexpr(diagnose) {
285  diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PIR and its sub-domain.", f));
286  error_context = diagnostics.num_suberrors();
287  }
288  bool res = false;
289  AType current = f.type();
290  const_cast<F&>(f).type_as(sub->aty()); // We will restore the type after the call to sub->interpret.
291  if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
292  // A successful interpretation in the sub-domain does not mean it is interpreted exactly.
293  // Sometimes, we can improve the precision by interpreting it in PC.
294  // This is the case of `x in S` predicate for sub-domain that do not preserve meet.
295  if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
296  res = true; // it is not a formula `x in S`.
297  }
298  else {
299  res = universe_type::preserve_join; // it is `x in S` but it preserves join.
300  }
301  }
302  const_cast<F&>(f).type_as(current);
303  if(!res) {
304  if(f.is_binary() && f.sig() == IN && f.seq(1).is(F::S)) {
305  F g = normalize(ternarize(decompose_in_constraint(f), env));
306  res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
307  }
308  /** If `t1 <op> t2 in S` we decompose `t2 in S`. */
309  else if(f.is_binary() && f.seq(1).is_binary() && f.seq(1).sig() == IN) {
310  F g = normalize(ternarize(F::make_binary(f.seq(0), f.sig(), decompose_in_constraint(f.seq(1))), env, true));
311  res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
312  }
313  /** It is a unary constraint but that could not be interpreted by the underlying store. */
314  else if(num_vars(f) == 1) {
315  F g = normalize(ternarize(f, env, true));
316  if(g != f) {
317  res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
318  }
319  else {
320  res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
321  }
322  }
323  else {
324  res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
325  }
326  }
327  if constexpr(diagnose) {
328  diagnostics.merge(res, error_context);
329  }
330  return res;
331  }
332 
333  /** PIR expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
334  */
335  template <bool diagnose = false, class F, class Env, class Alloc2>
336  CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
337  return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
338  }
339 
340  template <bool diagnose = false, class F, class Env, class Alloc2>
341  CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
342  return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
343  }
344 
345  /** Similar limitations than `PC::deduce`. */
346  template <class Alloc2>
347  CUDA local::B deduce(const tell_type<Alloc2>& t) {
348  local::B has_changed = sub->deduce(t.sub_value);
349  if(t.bytecodes.size() > 0) {
350  bytecodes->reserve(bytecodes->size() + t.bytecodes.size());
351  for(int i = 0; i < t.bytecodes.size(); ++i) {
352  bytecodes->push_back(t.bytecodes[i]);
353  }
354  /** This is sorting the constraints `X = Y <op> Z` according to <OP>. */
355  battery::sorti(*bytecodes,
356  [&](int i, int j) { return (*bytecodes)[i].op < (*bytecodes)[j].op; });
357  has_changed = true;
358  }
359  return has_changed;
360  }
361 
362  CUDA bool embed(AVar x, const universe_type& dom) {
363  return sub->embed(x, dom);
364  }
365 
366 private:
367  CUDA local::B ask(bytecode_type bytecode) const {
368  // We load the variables.
369  local_universe_type r1((*sub)[bytecode.x]);
370  local_universe_type r2((*sub)[bytecode.y]);
371  local_universe_type r3((*sub)[bytecode.z]);
372 
373  // Reified constraint: X = (Y = Z) and X = (Y <= Z).
374  if(bytecode.op == EQ || bytecode.op == LEQ) {
375  // Y [op] Z
376  if(r1 >= ONE) {
377  return bytecode.op == EQ
378  ? (r2 == r3 && r2.lb().value() == r2.ub().value())
379  : r2.ub().value() <= r3.lb().value();
380  }
381  // not (Y [op] Z)
382  else if(r1 >= ZERO) {
383  return bytecode.op == EQ
384  ? fmeet(r2, r3).is_bot().value()
385  : r2.lb().value() > r3.ub().value();
386  }
387  // We need `r1` to be a singleton to decide whether the constraint is entailed.
388  return false;
389  }
390  // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
391  else {
392  local_universe_type right;
393  right.project(bytecode.op, r2, r3);
394  return right == r1 && r1.lb().value() == r1.ub().value();
395  }
396  }
397 
398 public:
399  CUDA INLINE bytecode_type load_deduce(int i) const {
400  #ifdef __CUDA_ARCH__
401  // Vectorize load (int4).
402  int4 b4 = reinterpret_cast<int4*>(bytecodes->data())[i];
403  return *reinterpret_cast<bytecode_type*>(&b4);
404  #else
405  return (*bytecodes)[i];
406  #endif
407  }
408 
409  CUDA local::B ask(int i) const {
410  return ask(load_deduce(i));
411  }
412 
413  template <class Alloc2>
414  CUDA local::B ask(const ask_type<Alloc2>& t) const {
415  for(int i = 0; i < t.bytecodes.size(); ++i) {
416  if(!ask(t.bytecodes[i])) {
417  return false;
418  }
419  }
420  return sub->ask(t.sub_value);
421  }
422 
423  CUDA int num_deductions() const {
424  return bytecodes->size();
425  }
426 
427 public:
428  CUDA local::B deduce(int i) {
429  assert(i < num_deductions());
430  return deduce(load_deduce(i));
431  }
432 
433  CUDA local::B deduce(bytecode_type bytecode) {
434  local::B has_changed = false;
435  // We load the variables.
437  local_universe_type r2((*sub)[bytecode.y]);
438  local_universe_type r3((*sub)[bytecode.z]);
439  // Reified constraint: X = (Y = Z) and X = (Y <= Z).
440  if(bytecode.op == EQ || bytecode.op == LEQ) {
441  r1 = (*sub)[bytecode.x];
442  // Y [op] Z
443  if(r1 <= ONE) {
444  if(bytecode.op == LEQ) {
445  r3.join_lb(LB::top());
446  r2.join_ub(UB::top());
447  }
448  has_changed |= sub->embed(bytecode.y, r3);
449  has_changed |= sub->embed(bytecode.z, r2);
450  }
451  // not (Y [op] Z)
452  else if(r1 <= ZERO) {
453  if(bytecode.op == EQ) {
454  if(r2.lb().value() == r2.ub().value()) {
455  r1 = r3;
456  r1.meet_lb(LB::prev(r2.lb()));
457  r3.meet_ub(UB::prev(r2.ub()));
458  has_changed |= sub->embed(bytecode.z, fjoin(r1,r3));
459  }
460  else if(r3.lb().value() == r3.ub().value()) {
461  r1 = r2;
462  r1.meet_lb(LB::prev(r3.lb()));
463  r2.meet_ub(UB::prev(r3.ub()));
464  has_changed |= sub->embed(bytecode.y, fjoin(r1,r2));
465  }
466  }
467  else {
468  assert(bytecode.op == LEQ);
469  r2.meet_lb(LB::prev(r3.lb()));
470  r3.meet_ub(UB::prev(r2.ub()));
471  has_changed |= sub->embed(bytecode.y, r2);
472  has_changed |= sub->embed(bytecode.z, r3);
473  }
474  }
475  // X <- 1
476  else if(r2.ub().value() <= r3.lb().value() && (bytecode.op == LEQ || r2.lb().value() == r3.ub().value())) {
477  has_changed |= sub->embed(bytecode.x, ONE);
478  }
479  // X <- 0
480  else if(r2.lb().value() > r3.ub().value() || (bytecode.op == EQ && r2.ub().value() < r3.lb().value())) {
481  has_changed |= sub->embed(bytecode.x, ZERO);
482  }
483  }
484  // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
485  else {
486  // X <- Y [op] Z
487  r1.project(bytecode.op, r2, r3);
488  has_changed |= sub->embed(bytecode.x, r1);
489 
490  // Y <- X <left residual> Z
491  r1 = (*sub)[bytecode.x];
492  switch(bytecode.op) {
493  case ADD: pc::GroupAdd<local_universe_type>::left_residual(r1, r3, r2); break;
494  case SUB: pc::GroupSub<local_universe_type>::left_residual(r1, r3, r2); break;
495  case MUL: pc::GroupMul<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
496  case EDIV: pc::GroupDiv<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
499  default: assert(false);
500  }
501  has_changed |= sub->embed(bytecode.y, r2);
502 
503  // Z <- X <right residual> Y
504  r2 = (*sub)[bytecode.y];
505  r3.join_top();
506  switch(bytecode.op) {
507  case ADD: pc::GroupAdd<local_universe_type>::right_residual(r1, r2, r3); break;
508  case SUB: pc::GroupSub<local_universe_type>::right_residual(r1, r2, r3); break;
509  case MUL: pc::GroupMul<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
510  case EDIV: pc::GroupDiv<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
513  default: assert(false);
514  }
515  has_changed |= sub->embed(bytecode.z, r3);
516  }
517  return has_changed;
518  }
519 
520  // using event_type = battery::bitset<3, battery::local_memory, unsigned int>;
521  // CUDA event_type deduce(bytecode_type& bytecode) {
522  // event_type has_changed;
523 
524  // // We load the variables.
525  // local_universe_type r1;
526  // local_universe_type r2((*sub)[bytecode.y]);
527  // local_universe_type r3((*sub)[bytecode.z]);
528  // // Reified constraint: X = (Y = Z) and X = (Y <= Z).
529  // if(bytecode.op == EQ || bytecode.op == LEQ) {
530  // r1 = (*sub)[bytecode.x];
531  // // Y [op] Z
532  // if(r1 <= ONE) {
533  // if(bytecode.op == LEQ) {
534  // r3.join_lb(LB::top());
535  // r2.join_ub(UB::top());
536  // }
537  // has_changed.set(1, sub->embed(bytecode.y, r3));
538  // has_changed.set(2, sub->embed(bytecode.z, r2));
539  // }
540  // // not (Y [op] Z)
541  // else if(r1 <= ZERO) {
542  // if(bytecode.op == EQ) {
543  // if(r2.lb().value() == r2.ub().value()) {
544  // r1 = r3;
545  // r1.meet_lb(LB::prev(r2.lb()));
546  // r3.meet_ub(UB::prev(r2.ub()));
547  // has_changed.set(2, sub->embed(bytecode.z, fjoin(r1,r3)));
548  // }
549  // else if(r3.lb().value() == r3.ub().value()) {
550  // r1 = r2;
551  // r1.meet_lb(LB::prev(r3.lb()));
552  // r2.meet_ub(UB::prev(r3.ub()));
553  // has_changed.set(1, sub->embed(bytecode.y, fjoin(r1,r2)));
554  // }
555  // }
556  // else {
557  // assert(bytecode.op == LEQ);
558  // r2.meet_lb(LB::prev(r3.lb()));
559  // r3.meet_ub(UB::prev(r2.ub()));
560  // has_changed.set(1, sub->embed(bytecode.y, r2));
561  // has_changed.set(2, sub->embed(bytecode.z, r3));
562  // }
563  // }
564  // // X <- 1
565  // else if(r2.ub().value() <= r3.lb().value() && (bytecode.op == LEQ || r2.lb().value() == r3.ub().value())) {
566  // has_changed.set(0, sub->embed(bytecode.x, ONE));
567  // }
568  // // X <- 0
569  // else if(r2.lb().value() > r3.ub().value() || (bytecode.op == EQ && r2.ub().value() < r3.lb().value())) {
570  // has_changed.set(0, sub->embed(bytecode.x, ZERO));
571  // }
572  // }
573  // // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
574  // else {
575  // // X <- Y [op] Z
576  // r1.project(bytecode.op, r2, r3);
577  // has_changed.set(0, sub->embed(bytecode.x, r1));
578 
579  // // Y <- X <left residual> Z
580  // r1 = (*sub)[bytecode.x];
581  // switch(bytecode.op) {
582  // case ADD: pc::GroupAdd<local_universe_type>::left_residual(r1, r3, r2); break;
583  // case SUB: pc::GroupSub<local_universe_type>::left_residual(r1, r3, r2); break;
584  // case MUL: pc::GroupMul<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
585  // case EDIV: pc::GroupDiv<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
586  // case MIN: pc::GroupMinMax<local_universe_type, MIN>::left_residual(r1, r3, r2); break;
587  // case MAX: pc::GroupMinMax<local_universe_type, MAX>::left_residual(r1, r3, r2); break;
588  // default: assert(false);
589  // }
590  // has_changed.set(1, sub->embed(bytecode.y, r2));
591 
592  // // Z <- X <right residual> Y
593  // r2 = (*sub)[bytecode.y];
594  // r3.join_top();
595  // switch(bytecode.op) {
596  // case ADD: pc::GroupAdd<local_universe_type>::right_residual(r1, r2, r3); break;
597  // case SUB: pc::GroupSub<local_universe_type>::right_residual(r1, r2, r3); break;
598  // case MUL: pc::GroupMul<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
599  // case EDIV: pc::GroupDiv<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
600  // case MIN: pc::GroupMinMax<local_universe_type, MIN>::right_residual(r1, r2, r3); break;
601  // case MAX: pc::GroupMinMax<local_universe_type, MAX>::right_residual(r1, r2, r3); break;
602  // default: assert(false);
603  // }
604  // has_changed.set(2, sub->embed(bytecode.z, r3));
605  // }
606  // return has_changed;
607  // }
608 
609  // Functions forwarded to the sub-domain `A`.
610 
611  /** `true` if the underlying abstract element is bot, `false` otherwise. */
612  CUDA local::B is_bot() const {
613  return sub->is_bot();
614  }
615 
616  /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
617  CUDA local::B is_top() const {
618  return sub->is_top() && bytecodes->size() == 0;
619  }
620 
621  CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
622  return (*sub)[x];
623  }
624 
625  CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
626  return sub->project(x);
627  }
628 
629  template <class Univ>
630  CUDA void project(AVar x, Univ& u) const {
631  sub->project(x, u);
632  }
633 
634  CUDA int vars() const {
635  return sub->vars();
636  }
637 
638  template <class Alloc2 = allocator_type>
639  CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
640  return snapshot_type<Alloc2>(bytecodes->size(), sub->snapshot(alloc));
641  }
642 
643  template <class Alloc2>
644  CUDA void restore(const snapshot_type<Alloc2>& snap) {
645  int n = bytecodes->size();
646  for(int i = snap.num_bytecodes; i < n; ++i) {
647  bytecodes->pop_back();
648  }
649  sub->restore(snap.sub_snap);
650  }
651 
652  /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
653  template <class ExtractionStrategy = NonAtomicExtraction>
654  CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
655  if(is_bot()) {
656  return false;
657  }
658  for(int i = 0; i < bytecodes->size(); ++i) {
659  if(!ask(i)) {
660  return false;
661  }
662  }
663  return sub->is_extractable(strategy);
664  }
665 
666  /** Extract the current element into `ua`.
667  * \pre `is_extractable()` must be `true`.
668  * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
669  * (It is OK, since they are entailed, they don't bring information anymore.) */
670  template <class B>
671  CUDA void extract(B& ua) const {
672  if constexpr(impl::is_pir_like<B>::value) {
673  sub->extract(*ua.sub);
674  }
675  else {
676  sub->extract(ua);
677  }
678  }
679 
680 private:
681  template<class Env, class Allocator2>
682  CUDA NI TFormula<Allocator2> deinterpret(bytecode_type bytecode, const Env& env, Allocator2 allocator) const {
683  using F = TFormula<Allocator2>;
684  auto X = F::make_lvar(bytecode.x.aty(), LVar<Allocator2>(env.name_of(bytecode.x), allocator));
685  auto Y = F::make_lvar(bytecode.y.aty(), LVar<Allocator2>(env.name_of(bytecode.y), allocator));
686  auto Z = F::make_lvar(bytecode.z.aty(), LVar<Allocator2>(env.name_of(bytecode.z), allocator));
687  return F::make_binary(X, EQ, F::make_binary(Y, bytecode.op, Z, aty(), allocator), aty(), allocator);
688  }
689 public:
690 
691  template<class Env, class Allocator2 = typename Env::allocator_type>
692  CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
693  using F = TFormula<Allocator2>;
694  typename F::Sequence seq{allocator};
695  seq.push_back(sub->deinterpret(env, allocator));
696  for(int i = 0; i < bytecodes->size(); ++i) {
697  seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
698  }
699  return F::make_nary(AND, std::move(seq), aty());
700  }
701 
702  template<class I, class Env, class Allocator2 = typename Env::allocator_type>
703  CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
704  using F = TFormula<Allocator2>;
705  typename F::Sequence seq{allocator};
706  seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
707  for(int i = 0; i < intermediate.bytecodes.size(); ++i) {
708  seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
709  }
710  return F::make_nary(AND, std::move(seq), aty());
711  }
712 };
713 
714 }
715 
716 #endif
Definition: pir.hpp:54
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:215
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: pir.hpp:282
CUDA auto operator[](int x) const
Definition: pir.hpp:621
CUDA allocator_type get_allocator() const
Definition: pir.hpp:198
CUDA auto project(AVar x) const
Definition: pir.hpp:625
constexpr static const bool preserve_bot
Definition: pir.hpp:92
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:206
CUDA void project(AVar x, Univ &u) const
Definition: pir.hpp:630
typename A::universe_type universe_type
Definition: pir.hpp:57
constexpr static const char * name
Definition: pir.hpp:99
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pir.hpp:703
constexpr static const bool is_abstract_universe
Definition: pir.hpp:89
CUDA int num_deductions() const
Definition: pir.hpp:423
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:234
constexpr static const bool preserve_top
Definition: pir.hpp:94
CUDA local::B deduce(int i)
Definition: pir.hpp:428
constexpr static const bool preserve_join
Definition: pir.hpp:95
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition: pir.hpp:414
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition: pir.hpp:158
CUDA int vars() const
Definition: pir.hpp:634
Allocator allocator_type
Definition: pir.hpp:59
typename universe_type::local_type local_universe_type
Definition: pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition: pir.hpp:336
constexpr static const bool injective_concretization
Definition: pir.hpp:97
constexpr static const bool preserve_concrete_covers
Definition: pir.hpp:98
constexpr static const bool preserve_meet
Definition: pir.hpp:96
CUDA local::B ask(int i) const
Definition: pir.hpp:409
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition: pir.hpp:341
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition: pir.hpp:654
CUDA bool embed(AVar x, const universe_type &dom)
Definition: pir.hpp:362
CUDA AType aty() const
Definition: pir.hpp:202
typename A::allocator_type sub_allocator_type
Definition: pir.hpp:60
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition: pir.hpp:190
abstract_ptr< sub_type > sub_ptr
Definition: pir.hpp:87
CUDA local::B is_bot() const
Definition: pir.hpp:612
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition: pir.hpp:644
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pir.hpp:692
CUDA void extract(B &ua) const
Definition: pir.hpp:671
CUDA local::B is_top() const
Definition: pir.hpp:617
A sub_type
Definition: pir.hpp:56
CUDA PIR(PIR &&other)
Definition: pir.hpp:165
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:224
constexpr static const bool is_totally_ordered
Definition: pir.hpp:91
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition: pir.hpp:639
constexpr static const bool sequential
Definition: pir.hpp:90
CUDA local::B deduce(bytecode_type bytecode)
Definition: pir.hpp:433
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition: pir.hpp:347
CUDA INLINE bytecode_type load_deduce(int i) const
Definition: pir.hpp:399
Definition: formula.hpp:8
Definition: pir.hpp:125
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition: pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition: pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition: pir.hpp:126
friend struct interpreted_type
Definition: pir.hpp:149
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition: pir.hpp:127
interpreted_type & operator=(interpreted_type &&)=default
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition: pir.hpp:133
Definition: pir.hpp:65
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition: pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition: pir.hpp:67
sub_snap_type sub_snap
Definition: pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition: pir.hpp:66
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition: pir.hpp:70
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
Definition: pir.hpp:37
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition: pir.hpp:40
AVar x
Definition: pir.hpp:39
Sig op
Definition: pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition: pir.hpp:44
AVar z
Definition: pir.hpp:41
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:276
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:310
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:296
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:249
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:218
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:222