Lattice land propagators completion library
Loading...
Searching...
No Matches
pir.hpp
Go to the documentation of this file.
1// Copyright 2024 Pierre Talbot
2
3#ifndef LALA_PC_PIR_HPP
4#define LALA_PC_PIR_HPP
5
6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12#include "battery/bitset.hpp"
13
14#include "lala/logic/logic.hpp"
15#include "lala/logic/ternarize.hpp"
16#include "lala/universes/arith_bound.hpp"
17#include "lala/abstract_deps.hpp"
18#include "lala/vstore.hpp"
19
20#include "terms.hpp"
21
22namespace lala {
23
24template <class A, class Alloc> class PIR;
25namespace impl {
26 template <class>
27 struct is_pir_like {
28 static constexpr bool value = false;
29 };
30 template<class A, class Alloc>
31 struct is_pir_like<PIR<A, Alloc>> {
32 static constexpr bool value = true;
33 };
34}
35
36// This represents the constraints `X = Y [op] Z`.
38 Sig op;
39 AVar x;
40 AVar y;
41 AVar z;
42 constexpr bytecode_type() = default;
43 constexpr bytecode_type(const bytecode_type&) = default;
44 CUDA INLINE const AVar& operator[](int i) const {
45 return i == 0 ? x : (i == 1 ? y : z);
46 }
47};
48
49/** PIR is an abstract transformer built on top of an abstract domain `A`.
50 It is expected that `A` has a projection function `u = project(x)`.
51 We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
52 An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
53template <class A, class Allocator = typename A::allocator_type>
54class PIR {
55public:
56 using sub_type = A;
57 using universe_type = typename A::universe_type;
58 using local_universe_type = typename universe_type::local_type;
59 using allocator_type = Allocator;
60 using sub_allocator_type = typename A::allocator_type;
62
63 template <class Alloc>
65 {
69
74
79
80 template <class SnapshotType>
81 CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
83 , sub_snap(other.sub_snap, alloc)
84 {}
85 };
86
87 using sub_ptr = abstract_ptr<sub_type>;
88
89 constexpr static const bool is_abstract_universe = false;
90 constexpr static const bool sequential = sub_type::sequential;
91 constexpr static const bool is_totally_ordered = false;
92 constexpr static const bool preserve_bot = true;
93 // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
94 constexpr static const bool preserve_top = sub_type::preserve_top;
95 constexpr static const bool preserve_join = sub_type::preserve_join;
96 constexpr static const bool preserve_meet = sub_type::preserve_meet;
97 constexpr static const bool injective_concretization = sub_type::injective_concretization;
98 constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
99 constexpr static const char* name = "PIR";
100
101 template <class A2, class Alloc2>
102 friend class PIR;
103
104 using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
105private:
106 AType atype;
107 sub_ptr sub;
108
109 const local_universe_type ZERO;
110 const local_universe_type ONE;
111
112 static_assert(sizeof(int) == sizeof(AVar), "The size of AVar must be equal to the size of an int.");
113 static_assert(sizeof(int) == sizeof(Sig), "The size of Sig must be equal to the size of an int.");
114
115 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>, allocator_type>;
116
117 /** We represent the constraints X = Y [op] Z. */
118 bytecodes_ptr bytecodes;
119
120 using LB = typename local_universe_type::LB;
121 using UB = typename local_universe_type::UB;
122
123public:
124 template <class Alloc, class SubType>
126 SubType sub_value;
127 battery::vector<bytecode_type, Alloc> bytecodes;
128
132
133 CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
135 , bytecodes(alloc)
136 {}
137
138 CUDA interpreted_type(const Alloc& alloc = Alloc{})
139 : interpreted_type(SubType(alloc), alloc)
140 {}
141
142 template <class InterpretedType>
143 CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
144 : sub_value(other.sub_value, alloc)
145 , bytecodes(other.bytecodes, alloc)
146 {}
147
148 template <class Alloc2, class SubType2>
149 friend struct interpreted_type;
150 };
151
152 template <class Alloc>
154
155 template <class Alloc>
157
158 CUDA PIR(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
159 : atype(atype), sub(std::move(sub))
160 , ZERO(local_universe_type::eq_zero())
161 , ONE(local_universe_type::eq_one())
162 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, alloc))
163 {}
164
165 template <class PIR2>
166 CUDA PIR(const PIR2& other, sub_ptr sub, const allocator_type& alloc = allocator_type{})
167 : atype(atype), sub(sub)
168 , ZERO(local_universe_type::eq_zero())
169 , ONE(local_universe_type::eq_one())
170 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc))
171 {}
172
173 CUDA PIR(PIR&& other)
174 : atype(other.atype)
175 , sub(std::move(other.sub))
176 , ZERO(std::move(other.ZERO))
177 , ONE(std::move(other.ONE))
178 , bytecodes(std::move(other.bytecodes))
179 {}
180
181private:
182 // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
183 // This allows to save up memory and to avoid contention on L2 cache among blocks.
184 template<class A2, class Alloc2, class... Allocators>
185 CUDA static bytecodes_ptr init_bytecodes(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
186 auto alloc = deps.template get_allocator<allocator_type>();
187 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
188 if(deps.is_shared_copy()) {
189 assert(static_cast<bool>(other.bytecodes));
190 return other.bytecodes;
191 }
192 }
193 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
194 return std::move(r);
195 }
196
197public:
198 template<class A2, class Alloc2, class... Allocators>
199 CUDA PIR(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
200 : atype(other.atype)
201 , sub(deps.template clone<A>(other.sub))
202 , ZERO(other.ZERO)
203 , ONE(other.ONE)
204 , bytecodes(init_bytecodes(other, deps))
205 {}
206
208 return bytecodes.get_allocator();
209 }
210
211 CUDA AType aty() const {
212 return atype;
213 }
214
215 CUDA static this_type bot(AType atype = UNTYPED,
216 AType atype_sub = UNTYPED,
217 const allocator_type& alloc = allocator_type(),
218 const sub_allocator_type& sub_alloc = sub_allocator_type())
219 {
220 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
221 }
222
223 /** A special symbolic element representing top. */
224 CUDA static this_type top(AType atype = UNTYPED,
225 AType atype_sub = UNTYPED,
226 const allocator_type& alloc = allocator_type(),
227 const sub_allocator_type& sub_alloc = sub_allocator_type())
228 {
229 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
230 }
231
232 template <class Env>
233 CUDA static this_type bot(Env& env,
234 const allocator_type& alloc = allocator_type(),
235 const sub_allocator_type& sub_alloc = sub_allocator_type())
236 {
237 AType atype_sub = env.extends_abstract_dom();
238 AType atype = env.extends_abstract_dom();
239 return bot(atype, atype_sub, alloc, sub_alloc);
240 }
241
242 template <class Env>
243 CUDA static this_type top(Env& env,
244 const allocator_type& alloc = allocator_type(),
245 const sub_allocator_type& sub_alloc = sub_allocator_type())
246 {
247 AType atype_sub = env.extends_abstract_dom();
248 AType atype = env.extends_abstract_dom();
249 return top(atype, atype_sub, alloc, sub_alloc);
250 }
251
252private:
253 /** We interpret the formula `f` in the value `intermediate`, note that we only add one constraint to `intermediate` if the interpretation succeeds. */
254 template <IKind kind, bool diagnose, class F, class Env, class Intermediate>
255 CUDA bool interpret_formula(const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics) const {
256 if(f.type() != aty() && !f.is_untyped()) {
257 RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
258 }
259 if(f.is_binary()) {
260 Sig sig = f.sig();
261 // Expect constraint of the form X = Y <OP> Z, or Y <OP> Z = X.
262 int left = f.seq(0).is_binary() ? 1 : 0;
263 int right = f.seq(1).is_binary() ? 1 : 0;
264 if(sig == EQ && (left + right == 1)) {
265 auto& X = f.seq(left);
266 auto& Y = f.seq(right).seq(0);
267 auto& Z = f.seq(right).seq(1);
268 bytecode_type bytecode;
269 bytecode.op = f.seq(right).sig();
270 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
271 (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == EDIV || bytecode.op == EMOD
272 || bytecode.op == MIN || bytecode.op == MAX
273 || bytecode.op == EQ || bytecode.op == LEQ))
274 {
275 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
276 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
277 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
278 {
279 intermediate.bytecodes.push_back(bytecode);
280 return true;
281 }
282 RETURN_INTERPRETATION_ERROR("Could not interpret the variables in the environment.");
283 }
284 }
285 }
286 RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
287 }
288
289public:
290 template <IKind kind, bool diagnose = false, class F, class Env, class I>
291 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
292 size_t error_context = 0;
293 if constexpr(diagnose) {
294 diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PIR and its sub-domain.", f));
295 error_context = diagnostics.num_suberrors();
296 }
297 bool res = false;
298 AType current = f.type();
299 const_cast<F&>(f).type_as(sub->aty()); // We will restore the type after the call to sub->interpret.
300 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
301 res = true;
302 }
303 const_cast<F&>(f).type_as(current);
304 if(!res) {
305 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
306 }
307 if constexpr(diagnose) {
308 diagnostics.merge(res, error_context);
309 }
310 return res;
311 }
312
313 /** PIR expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
314 */
315 template <bool diagnose = false, class F, class Env, class Alloc2>
316 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
317 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
318 }
319
320 template <bool diagnose = false, class F, class Env, class Alloc2>
321 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
322 return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
323 }
324
325 /** Similar limitations than `PC::deduce`. */
326 template <class Alloc2>
327 CUDA local::B deduce(const tell_type<Alloc2>& t) {
328 local::B has_changed = sub->deduce(t.sub_value);
329 if(t.bytecodes.size() > 0) {
330 bytecodes->reserve(bytecodes->size() + t.bytecodes.size());
331 for(int i = 0; i < t.bytecodes.size(); ++i) {
332 bytecodes->push_back(t.bytecodes[i]);
333 }
334 /** This is sorting the constraints `X = Y <op> Z` according to <OP>.
335 * Note that battery::sorti is much slower than std::sort, therefore the #ifdef. */
336 #ifdef __CUDA_ARCH__
337 battery::sorti(*bytecodes,
338 [&](int i, int j) { return (*bytecodes)[i].op < (*bytecodes)[j].op; });
339 #else
340 std::stable_sort(bytecodes->data(), bytecodes->data() + bytecodes->size(),
341 [](const bytecode_type& a, const bytecode_type& b) {
342 // return a.op < b.op;
343 return a.op == b.op ? (a.y.vid() == b.y.vid() ? (a.x.vid() == b.x.vid() ? a.z.vid() < b.z.vid() : a.x.vid() < b.x.vid()) : a.y.vid() < b.y.vid()) : a.op < b.op;
344 });
345 #endif
346 has_changed = true;
347 }
348 return has_changed;
349 }
350
351 CUDA bool embed(AVar x, const universe_type& dom) {
352 return sub->embed(x, dom);
353 }
354
355private:
356 CUDA local::B ask(bytecode_type bytecode) const {
357 // We load the variables.
358 local_universe_type r1((*sub)[bytecode.x]);
359 local_universe_type r2((*sub)[bytecode.y]);
360 local_universe_type r3((*sub)[bytecode.z]);
361
362 // Reified constraint: X = (Y = Z) and X = (Y <= Z).
363 if(bytecode.op == EQ || bytecode.op == LEQ) {
364 // Y [op] Z
365 if(r1 <= ONE) {
366 return bytecode.op == EQ
367 ? (r2 == r3 && r2.lb().value() == r2.ub().value())
368 : r2.ub().value() <= r3.lb().value();
369 }
370 // not (Y [op] Z)
371 else if(r1 <= ZERO) {
372 return bytecode.op == EQ
373 ? fmeet(r2, r3).is_bot().value()
374 : r2.lb().value() > r3.ub().value();
375 }
376 // We need `r1` to be a singleton to decide whether the constraint is entailed.
377 return false;
378 }
379 // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
380 else {
382 right.project(bytecode.op, r2, r3);
383 return right == r1 && r1.lb().value() == r1.ub().value();
384 }
385 }
386
387public:
388 CUDA INLINE bytecode_type load_deduce(int i) const {
389 #ifdef __CUDA_ARCH__
390 // Vectorize load (int4).
391 int4 b4 = reinterpret_cast<int4*>(bytecodes->data())[i];
392 return *reinterpret_cast<bytecode_type*>(&b4);
393 #else
394 return (*bytecodes)[i];
395 #endif
396 }
397
398 CUDA local::B ask(int i) const {
399 return ask(load_deduce(i));
400 }
401
402 template <class Alloc2>
403 CUDA local::B ask(const ask_type<Alloc2>& t) const {
404 for(int i = 0; i < t.bytecodes.size(); ++i) {
405 if(!ask(t.bytecodes[i])) {
406 return false;
407 }
408 }
409 return sub->ask(t.sub_value);
410 }
411
412 CUDA int num_deductions() const {
413 return bytecodes->size();
414 }
415
416public:
417 CUDA local::B deduce(int i) {
418 assert(i < num_deductions());
419 return deduce(load_deduce(i));
420 }
421
422 CUDA local::B deduce(bytecode_type bytecode) {
423 local::B has_changed = false;
424 // We load the variables.
425 local_universe_type r1((*sub)[bytecode.x]);
426 local_universe_type r2((*sub)[bytecode.y]);
427 local_universe_type r3((*sub)[bytecode.z]);
428 // Reified constraint: X = (Y = Z) and X = (Y <= Z).
429 if(bytecode.op == EQ || bytecode.op == LEQ) {
430 // Y [op] Z
431 if(r1 <= ONE) {
432 if(bytecode.op == LEQ) {
433 r3.join_lb(LB::top());
434 r2.join_ub(UB::top());
435 }
436 has_changed |= sub->embed(bytecode.y, r3);
437 has_changed |= sub->embed(bytecode.z, r2);
438 }
439 // not (Y [op] Z)
440 else if(r1 <= ZERO) {
441 if(bytecode.op == EQ) {
442 if(r2.lb().value() == r2.ub().value()) {
443 r1 = r3;
444 r1.meet_lb(LB::prev(r2.lb()));
445 r3.meet_ub(UB::prev(r2.ub()));
446 has_changed |= sub->embed(bytecode.z, fjoin(r1,r3));
447 }
448 else if(r3.lb().value() == r3.ub().value()) {
449 r1 = r2;
450 r1.meet_lb(LB::prev(r3.lb()));
451 r2.meet_ub(UB::prev(r3.ub()));
452 has_changed |= sub->embed(bytecode.y, fjoin(r1,r2));
453 }
454 }
455 else {
456 assert(bytecode.op == LEQ);
457 r2.meet_lb(LB::prev(r3.lb()));
458 r3.meet_ub(UB::prev(r2.ub()));
459 has_changed |= sub->embed(bytecode.y, r2);
460 has_changed |= sub->embed(bytecode.z, r3);
461 }
462 }
463 // X <- 1
464 else if(r2.ub().value() <= r3.lb().value() && (bytecode.op == LEQ || r2.lb().value() == r3.ub().value())) {
465 has_changed |= sub->embed(bytecode.x, ONE);
466 }
467 // X <- 0
468 else if(r2.lb().value() > r3.ub().value() || (bytecode.op == EQ && r2.ub().value() < r3.lb().value())) {
469 has_changed |= sub->embed(bytecode.x, ZERO);
470 }
471 }
472 // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
473 else {
474 // X <- Y [op] Z
475 r1.project(bytecode.op, r2, r3);
476 has_changed |= sub->embed(bytecode.x, r1);
477
478 // Y <- X <left residual> Z
479 switch(bytecode.op) {
480 case ADD: pc::GroupAdd<local_universe_type>::left_residual(r1, r3, r2); break;
482 case EDIV: pc::GroupDiv<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
483 case EMOD: {
484 if(r1.is_bot() || r3.is_bot()) {
485 r2.meet_bot();
486 }
487 break;
488 }
491 default: assert(false);
492 }
493 has_changed |= sub->embed(bytecode.y, r2);
494
495 // Z <- X <right residual> Y
496 switch(bytecode.op) {
497 case ADD: pc::GroupAdd<local_universe_type>::right_residual(r1, r2, r3); break;
500 case EMOD: {
501 if(r1.is_bot() || r2.is_bot()) {
502 r3.meet_bot();
503 }
504 break;
505 }
508 default: assert(false);
509 }
510 has_changed |= sub->embed(bytecode.z, r3);
511 }
512 return has_changed;
513 }
514
515 // using event_type = battery::bitset<3, battery::local_memory, unsigned int>;
516 // CUDA event_type deduce(bytecode_type& bytecode) {
517 // event_type has_changed;
518
519 // // We load the variables.
520 // local_universe_type r1;
521 // local_universe_type r2((*sub)[bytecode.y]);
522 // local_universe_type r3((*sub)[bytecode.z]);
523 // // Reified constraint: X = (Y = Z) and X = (Y <= Z).
524 // if(bytecode.op == EQ || bytecode.op == LEQ) {
525 // r1 = (*sub)[bytecode.x];
526 // // Y [op] Z
527 // if(r1 <= ONE) {
528 // if(bytecode.op == LEQ) {
529 // r3.join_lb(LB::top());
530 // r2.join_ub(UB::top());
531 // }
532 // has_changed.set(1, sub->embed(bytecode.y, r3));
533 // has_changed.set(2, sub->embed(bytecode.z, r2));
534 // }
535 // // not (Y [op] Z)
536 // else if(r1 <= ZERO) {
537 // if(bytecode.op == EQ) {
538 // if(r2.lb().value() == r2.ub().value()) {
539 // r1 = r3;
540 // r1.meet_lb(LB::prev(r2.lb()));
541 // r3.meet_ub(UB::prev(r2.ub()));
542 // has_changed.set(2, sub->embed(bytecode.z, fjoin(r1,r3)));
543 // }
544 // else if(r3.lb().value() == r3.ub().value()) {
545 // r1 = r2;
546 // r1.meet_lb(LB::prev(r3.lb()));
547 // r2.meet_ub(UB::prev(r3.ub()));
548 // has_changed.set(1, sub->embed(bytecode.y, fjoin(r1,r2)));
549 // }
550 // }
551 // else {
552 // assert(bytecode.op == LEQ);
553 // r2.meet_lb(LB::prev(r3.lb()));
554 // r3.meet_ub(UB::prev(r2.ub()));
555 // has_changed.set(1, sub->embed(bytecode.y, r2));
556 // has_changed.set(2, sub->embed(bytecode.z, r3));
557 // }
558 // }
559 // // X <- 1
560 // else if(r2.ub().value() <= r3.lb().value() && (bytecode.op == LEQ || r2.lb().value() == r3.ub().value())) {
561 // has_changed.set(0, sub->embed(bytecode.x, ONE));
562 // }
563 // // X <- 0
564 // else if(r2.lb().value() > r3.ub().value() || (bytecode.op == EQ && r2.ub().value() < r3.lb().value())) {
565 // has_changed.set(0, sub->embed(bytecode.x, ZERO));
566 // }
567 // }
568 // // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
569 // else {
570 // // X <- Y [op] Z
571 // r1.project(bytecode.op, r2, r3);
572 // has_changed.set(0, sub->embed(bytecode.x, r1));
573
574 // // Y <- X <left residual> Z
575 // r1 = (*sub)[bytecode.x];
576 // switch(bytecode.op) {
577 // case ADD: pc::GroupAdd<local_universe_type>::left_residual(r1, r3, r2); break;
578 // case SUB: pc::GroupSub<local_universe_type>::left_residual(r1, r3, r2); break;
579 // case MUL: pc::GroupMul<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
580 // case EDIV: pc::GroupDiv<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
581 // case MIN: pc::GroupMinMax<local_universe_type, MIN>::left_residual(r1, r3, r2); break;
582 // case MAX: pc::GroupMinMax<local_universe_type, MAX>::left_residual(r1, r3, r2); break;
583 // default: assert(false);
584 // }
585 // has_changed.set(1, sub->embed(bytecode.y, r2));
586
587 // // Z <- X <right residual> Y
588 // r2 = (*sub)[bytecode.y];
589 // r3.join_top();
590 // switch(bytecode.op) {
591 // case ADD: pc::GroupAdd<local_universe_type>::right_residual(r1, r2, r3); break;
592 // case SUB: pc::GroupSub<local_universe_type>::right_residual(r1, r2, r3); break;
593 // case MUL: pc::GroupMul<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
594 // case EDIV: pc::GroupDiv<local_universe_type, EDIV>::right_residual(r1, r2, r3); break;
595 // case MIN: pc::GroupMinMax<local_universe_type, MIN>::right_residual(r1, r2, r3); break;
596 // case MAX: pc::GroupMinMax<local_universe_type, MAX>::right_residual(r1, r2, r3); break;
597 // default: assert(false);
598 // }
599 // has_changed.set(2, sub->embed(bytecode.z, r3));
600 // }
601 // return has_changed;
602 // }
603
604 // Functions forwarded to the sub-domain `A`.
605
606 /** `true` if the underlying abstract element is bot, `false` otherwise. */
607 CUDA local::B is_bot() const {
608 return sub->is_bot();
609 }
610
611 /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
612 CUDA local::B is_top() const {
613 return sub->is_top() && bytecodes->size() == 0;
614 }
615
616 CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
617 return (*sub)[x];
618 }
619
620 CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
621 return sub->project(x);
622 }
623
624 template <class Univ>
625 CUDA void project(AVar x, Univ& u) const {
626 sub->project(x, u);
627 }
628
629 CUDA int vars() const {
630 return sub->vars();
631 }
632
633 template <class Alloc2 = allocator_type>
634 CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
635 assert(static_cast<bool>(bytecodes));
636 return snapshot_type<Alloc2>(bytecodes->size(), sub->snapshot(alloc));
637 }
638
639 template <class Alloc2>
640 CUDA void restore(const snapshot_type<Alloc2>& snap) {
641 int n = bytecodes->size();
642 for(int i = snap.num_bytecodes; i < n; ++i) {
643 bytecodes->pop_back();
644 }
645 sub->restore(snap.sub_snap);
646 }
647
648 /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
649 template <class ExtractionStrategy = NonAtomicExtraction>
650 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
651 if(is_bot()) {
652 return false;
653 }
654 for(int i = 0; i < bytecodes->size(); ++i) {
655 if(!ask(i)) {
656 return false;
657 }
658 }
659 return sub->is_extractable(strategy);
660 }
661
662 /** Extract the current element into `ua`.
663 * \pre `is_extractable()` must be `true`.
664 * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
665 * (It is OK, since they are entailed, they don't bring information anymore.) */
666 template <class B>
667 CUDA void extract(B& ua) const {
668 if constexpr(impl::is_pir_like<B>::value) {
669 sub->extract(*ua.sub);
670 }
671 else {
672 sub->extract(ua);
673 }
674 }
675
676private:
677 template<class Env, class Allocator2>
678 CUDA NI TFormula<Allocator2> deinterpret(bytecode_type bytecode, const Env& env, Allocator2 allocator) const {
679 using F = TFormula<Allocator2>;
680 auto X = F::make_lvar(bytecode.x.aty(), LVar<Allocator2>(env.name_of(bytecode.x), allocator));
681 auto Y = F::make_lvar(bytecode.y.aty(), LVar<Allocator2>(env.name_of(bytecode.y), allocator));
682 auto Z = F::make_lvar(bytecode.z.aty(), LVar<Allocator2>(env.name_of(bytecode.z), allocator));
683 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.op, Z, aty(), allocator), aty(), allocator);
684 }
685
686public:
687 template<class Env, class Allocator2 = typename Env::allocator_type>
688 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, bool remove_entailed, size_t& num_entailed, Allocator2 allocator = Allocator2()) const {
689 using F = TFormula<Allocator2>;
690 typename F::Sequence seq{allocator};
691 seq.push_back(sub->deinterpret(env, allocator));
692 for(int i = 0; i < bytecodes->size(); ++i) {
693 if(remove_entailed && ask(i)) {
694 num_entailed++;
695 continue;
696 }
697 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
698 }
699 return F::make_nary(AND, std::move(seq), aty());
700 }
701
702 template<class Env, class Allocator2 = typename Env::allocator_type>
703 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
704 size_t num_entailed = 0;
705 return deinterpret(env, false, num_entailed, allocator);
706 }
707
708 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
709 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
710 using F = TFormula<Allocator2>;
711 typename F::Sequence seq{allocator};
712 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
713 for(int i = 0; i < intermediate.bytecodes.size(); ++i) {
714 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
715 }
716 return F::make_nary(AND, std::move(seq), aty());
717 }
718};
719
720}
721
722#endif
Definition pir.hpp:54
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:224
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:291
CUDA auto operator[](int x) const
Definition pir.hpp:616
CUDA allocator_type get_allocator() const
Definition pir.hpp:207
CUDA auto project(AVar x) const
Definition pir.hpp:620
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:215
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:625
typename A::universe_type universe_type
Definition pir.hpp:57
static constexpr const bool preserve_join
Definition pir.hpp:95
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:634
CUDA int num_deductions() const
Definition pir.hpp:412
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:243
CUDA local::B deduce(int i)
Definition pir.hpp:417
static constexpr const bool preserve_meet
Definition pir.hpp:96
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:403
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:158
CUDA int vars() const
Definition pir.hpp:629
Allocator allocator_type
Definition pir.hpp:59
typename universe_type::local_type local_universe_type
Definition pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:316
static constexpr const bool sequential
Definition pir.hpp:90
static constexpr const char * name
Definition pir.hpp:99
static constexpr const bool injective_concretization
Definition pir.hpp:97
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:98
CUDA local::B ask(int i) const
Definition pir.hpp:398
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:321
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:650
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:351
CUDA AType aty() const
Definition pir.hpp:211
typename A::allocator_type sub_allocator_type
Definition pir.hpp:60
battery::vector< bytecode_type, allocator_type > bytecodes_type
Definition pir.hpp:104
static constexpr const bool preserve_bot
Definition pir.hpp:92
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:199
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, bool remove_entailed, size_t &num_entailed, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:688
static constexpr const bool preserve_top
Definition pir.hpp:94
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:87
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:703
CUDA local::B is_bot() const
Definition pir.hpp:607
static constexpr const bool is_totally_ordered
Definition pir.hpp:91
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:640
CUDA void extract(B &ua) const
Definition pir.hpp:667
CUDA PIR(const PIR2 &other, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:166
CUDA local::B is_top() const
Definition pir.hpp:612
A sub_type
Definition pir.hpp:56
CUDA PIR(PIR &&other)
Definition pir.hpp:173
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:233
static constexpr const bool is_abstract_universe
Definition pir.hpp:89
CUDA local::B deduce(bytecode_type bytecode)
Definition pir.hpp:422
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:327
CUDA INLINE bytecode_type load_deduce(int i) const
Definition pir.hpp:388
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:709
Definition formula.hpp:8
Definition pir.hpp:125
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:126
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:127
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:133
Definition pir.hpp:65
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition pir.hpp:67
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:66
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:70
Definition pir.hpp:37
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition pir.hpp:40
AVar x
Definition pir.hpp:39
Sig op
Definition pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition pir.hpp:44
AVar z
Definition pir.hpp:41
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:279
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:324
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:310
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:249