Lattice land propagators completion library
Loading...
Searching...
No Matches
pir.hpp
Go to the documentation of this file.
1// Copyright 2024 Pierre Talbot
2
3#ifndef LALA_PC_PIR_HPP
4#define LALA_PC_PIR_HPP
5
6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12#include "battery/bitset.hpp"
13
14#include "lala/logic/logic.hpp"
15#include "lala/logic/ternarize.hpp"
16#include "lala/universes/arith_bound.hpp"
17#include "lala/abstract_deps.hpp"
18#include "lala/vstore.hpp"
19
20#include "terms.hpp"
21
22namespace lala {
23
24template <class A, class Alloc> class PIR;
25namespace impl {
26 template <class>
27 struct is_pir_like {
28 static constexpr bool value = false;
29 };
30 template<class A, class Alloc>
31 struct is_pir_like<PIR<A, Alloc>> {
32 static constexpr bool value = true;
33 };
34}
35
36// This represents the constraints `X = Y [op] Z`.
38 Sig op;
39 AVar x;
40 AVar y;
41 AVar z;
42 constexpr bytecode_type() = default;
43 constexpr bytecode_type(const bytecode_type&) = default;
44 CUDA INLINE const AVar& operator[](int i) const {
45 return i == 0 ? x : (i == 1 ? y : z);
46 }
47};
48
49/** PIR is an abstract transformer built on top of an abstract domain `A`.
50 It is expected that `A` has a projection function `u = project(x)`.
51 We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
52 An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
53template <class A, class Allocator = typename A::allocator_type>
54class PIR {
55public:
56 using sub_type = A;
57 using universe_type = typename A::universe_type;
58 using local_universe_type = typename universe_type::local_type;
59 using allocator_type = Allocator;
60 using sub_allocator_type = typename A::allocator_type;
62
63 template <class Alloc>
65 {
69
74
79
80 template <class SnapshotType>
81 CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
83 , sub_snap(other.sub_snap, alloc)
84 {}
85 };
86
87 using sub_ptr = abstract_ptr<sub_type>;
88
89 constexpr static const bool is_abstract_universe = false;
90 constexpr static const bool sequential = sub_type::sequential;
91 constexpr static const bool is_totally_ordered = false;
92 constexpr static const bool preserve_bot = true;
93 // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
94 constexpr static const bool preserve_top = sub_type::preserve_top;
95 constexpr static const bool preserve_join = sub_type::preserve_join;
96 constexpr static const bool preserve_meet = sub_type::preserve_meet;
97 constexpr static const bool injective_concretization = sub_type::injective_concretization;
98 constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
99 constexpr static const char* name = "PIR";
100
101 template <class A2, class Alloc2>
102 friend class PIR;
103
104 using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
105private:
106 AType atype;
107 sub_ptr sub;
108
109 const local_universe_type ZERO;
110 const local_universe_type ONE;
111
112 static_assert(sizeof(int) == sizeof(AVar), "The size of AVar must be equal to the size of an int.");
113 static_assert(sizeof(int) == sizeof(Sig), "The size of Sig must be equal to the size of an int.");
114
115 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>, allocator_type>;
116
117 /** We represent the constraints X = Y [op] Z. */
118 bytecodes_ptr bytecodes;
119
120 using LB = typename local_universe_type::LB;
121 using UB = typename local_universe_type::UB;
122
123public:
124 template <class Alloc, class SubType>
126 SubType sub_value;
127 battery::vector<bytecode_type, Alloc> bytecodes;
128
132
133 CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
135 , bytecodes(alloc)
136 {}
137
138 CUDA interpreted_type(const Alloc& alloc = Alloc{})
139 : interpreted_type(SubType(alloc), alloc)
140 {}
141
142 template <class InterpretedType>
143 CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
144 : sub_value(other.sub_value, alloc)
145 , bytecodes(other.bytecodes, alloc)
146 {}
147
148 template <class Alloc2, class SubType2>
149 friend struct interpreted_type;
150 };
151
152 template <class Alloc>
154
155 template <class Alloc>
157
158 CUDA PIR(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
159 : atype(atype), sub(std::move(sub))
160 , ZERO(local_universe_type::eq_zero())
161 , ONE(local_universe_type::eq_one())
162 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, alloc))
163 {}
164
165 template <class PIR2>
166 CUDA PIR(const PIR2& other, sub_ptr sub, const allocator_type& alloc = allocator_type{})
167 : atype(atype), sub(sub)
168 , ZERO(local_universe_type::eq_zero())
169 , ONE(local_universe_type::eq_one())
170 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc))
171 {}
172
173 CUDA PIR(PIR&& other)
174 : atype(other.atype)
175 , sub(std::move(other.sub))
176 , ZERO(std::move(other.ZERO))
177 , ONE(std::move(other.ONE))
178 , bytecodes(std::move(other.bytecodes))
179 {}
180
181private:
182 // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
183 // This allows to save up memory and to avoid contention on L2 cache among blocks.
184 template<class A2, class Alloc2, class... Allocators>
185 CUDA static bytecodes_ptr init_bytecodes(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
186 auto alloc = deps.template get_allocator<allocator_type>();
187 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
188 if(deps.is_shared_copy()) {
189 assert(static_cast<bool>(other.bytecodes));
190 return other.bytecodes;
191 }
192 }
193 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
194 return std::move(r);
195 }
196
197public:
198 template<class A2, class Alloc2, class... Allocators>
199 CUDA PIR(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
200 : atype(other.atype)
201 , sub(deps.template clone<A>(other.sub))
202 , ZERO(other.ZERO)
203 , ONE(other.ONE)
204 , bytecodes(init_bytecodes(other, deps))
205 {}
206
208 return bytecodes.get_allocator();
209 }
210
211 CUDA AType aty() const {
212 return atype;
213 }
214
215 CUDA static this_type bot(AType atype = UNTYPED,
216 AType atype_sub = UNTYPED,
217 const allocator_type& alloc = allocator_type(),
218 const sub_allocator_type& sub_alloc = sub_allocator_type())
219 {
220 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
221 }
222
223 /** A special symbolic element representing top. */
224 CUDA static this_type top(AType atype = UNTYPED,
225 AType atype_sub = UNTYPED,
226 const allocator_type& alloc = allocator_type(),
227 const sub_allocator_type& sub_alloc = sub_allocator_type())
228 {
229 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
230 }
231
232 template <class Env>
233 CUDA static this_type bot(Env& env,
234 const allocator_type& alloc = allocator_type(),
235 const sub_allocator_type& sub_alloc = sub_allocator_type())
236 {
237 AType atype_sub = env.extends_abstract_dom();
238 AType atype = env.extends_abstract_dom();
239 return bot(atype, atype_sub, alloc, sub_alloc);
240 }
241
242 template <class Env>
243 CUDA static this_type top(Env& env,
244 const allocator_type& alloc = allocator_type(),
245 const sub_allocator_type& sub_alloc = sub_allocator_type())
246 {
247 AType atype_sub = env.extends_abstract_dom();
248 AType atype = env.extends_abstract_dom();
249 return top(atype, atype_sub, alloc, sub_alloc);
250 }
251
252private:
253 /** We interpret the formula `f` in the value `intermediate`, note that we only add one constraint to `intermediate` if the interpretation succeeds. */
254 template <IKind kind, bool diagnose, class F, class Env, class Intermediate>
255 CUDA bool interpret_formula(const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics) const {
256 if(f.type() != aty() && !f.is_untyped()) {
257 RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
258 }
259 if(f.is_binary()) {
260 Sig sig = f.sig();
261 // Expect constraint of the form X = Y <OP> Z, or Y <OP> Z = X.
262 int left = f.seq(0).is_binary() ? 1 : 0;
263 int right = f.seq(1).is_binary() ? 1 : 0;
264 if((sig == EQ || sig == EQUIV) && (left + right == 1)) {
265 auto& X = f.seq(left);
266 auto& Y = f.seq(right).seq(0);
267 auto& Z = f.seq(right).seq(1);
268 bytecode_type bytecode;
269 bytecode.op = f.seq(right).sig();
270 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
271 (bytecode.op == ADD || bytecode.op == MUL || ::lala::is_z_division(bytecode.op) || bytecode.op == EMOD
272 || bytecode.op == MIN || bytecode.op == MAX
273 || bytecode.op == EQ || bytecode.op == LEQ))
274 {
275 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
276 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
277 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
278 {
279 intermediate.bytecodes.push_back(bytecode);
280 return true;
281 }
282 RETURN_INTERPRETATION_ERROR("Could not interpret the variables in the environment.");
283 }
284 }
285 }
286 RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
287 }
288
289public:
290 template <IKind kind, bool diagnose = false, class F, class Env, class I>
291 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
292 size_t error_context = 0;
293 if constexpr(diagnose) {
294 diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PIR and its sub-domain.", f));
295 error_context = diagnostics.num_suberrors();
296 }
297 bool res = false;
298 AType current = f.type();
299 const_cast<F&>(f).type_as(sub->aty()); // We will restore the type after the call to sub->interpret.
300 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
301 res = true;
302 }
303 const_cast<F&>(f).type_as(current);
304 if(!res) {
305 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
306 }
307 if constexpr(diagnose) {
308 diagnostics.merge(res, error_context);
309 }
310 return res;
311 }
312
313 /** PIR expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
314 */
315 template <bool diagnose = false, class F, class Env, class Alloc2>
316 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
317 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
318 }
319
320 template <bool diagnose = false, class F, class Env, class Alloc2>
321 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
322 return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
323 }
324
325 /** Similar limitations than `PC::deduce`. */
326 template <class Alloc2>
327 CUDA local::B deduce(const tell_type<Alloc2>& t) {
328 local::B has_changed = sub->deduce(t.sub_value);
329 if(t.bytecodes.size() > 0) {
330 bytecodes->reserve(bytecodes->size() + t.bytecodes.size());
331 for(int i = 0; i < t.bytecodes.size(); ++i) {
332 bytecodes->push_back(t.bytecodes[i]);
333 if(t.bytecodes[i].op == EQ || t.bytecodes[i].op == LEQ) {
334 sub->embed(t.bytecodes[i].x, local_universe_type(0,1));
335 }
336 }
337 /** This is sorting the constraints `X = Y <op> Z` according to <OP>.
338 * Note that battery::sorti is much slower than std::sort, therefore the #ifdef. */
339 #ifdef __CUDA_ARCH__
340 battery::sorti(*bytecodes,
341 [&](int i, int j) { return (*bytecodes)[i].op < (*bytecodes)[j].op; });
342 #else
343 std::stable_sort(bytecodes->data(), bytecodes->data() + bytecodes->size(),
344 [](const bytecode_type& a, const bytecode_type& b) {
345 // return a.op < b.op;
346 return a.op == b.op ? (a.y.vid() == b.y.vid() ? (a.x.vid() == b.x.vid() ? a.z.vid() < b.z.vid() : a.x.vid() < b.x.vid()) : a.y.vid() < b.y.vid()) : a.op < b.op;
347 });
348 #endif
349 has_changed = true;
350 }
351 return has_changed;
352 }
353
354 CUDA bool embed(AVar x, const universe_type& dom) {
355 return sub->embed(x, dom);
356 }
357
358public:
359 CUDA INLINE bytecode_type load_deduce(int i) const {
360 #ifdef __CUDA_ARCH__
361 // Vectorize load (int4).
362 int4 b4 = reinterpret_cast<int4*>(bytecodes->data())[i];
363 return *reinterpret_cast<bytecode_type*>(&b4);
364 #else
365 return (*bytecodes)[i];
366 #endif
367 }
368
369 CUDA local::B ask(int i) const {
370 return ask(load_deduce(i));
371 }
372
373 template <class Alloc2>
374 CUDA local::B ask(const ask_type<Alloc2>& t) const {
375 for(int i = 0; i < t.bytecodes.size(); ++i) {
376 if(!ask(t.bytecodes[i])) {
377 return false;
378 }
379 }
380 return sub->ask(t.sub_value);
381 }
382
383 CUDA int num_deductions() const {
384 return bytecodes->size();
385 }
386
387public:
388 CUDA local::B deduce(int i) {
389 assert(i < num_deductions());
390 return deduce(load_deduce(i));
391 }
392
394 using value_t = typename Itv::LB::value_type;
395
396// Some defines to make the code more readable, and closer from the paper, without introducing local variables.
397#define xl r1.lb().value()
398#define xu r1.ub().value()
399#define yl r2.lb().value()
400#define yu r2.ub().value()
401#define zl r3.lb().value()
402#define zu r3.ub().value()
403
404#define INF std::numeric_limits<value_t>::max()
405#define MINF std::numeric_limits<value_t>::min()
406
407private:
408 CUDA INLINE value_t div(value_t a, Sig op, value_t b) const {
409 switch(op) {
410 case TDIV: return battery::tdiv(a, b);
411 case CDIV: return battery::cdiv(a, b);
412 case FDIV: return battery::fdiv(a, b);
413 case EDIV: return battery::ediv(a, b);
414 default: assert(false); return a;
415 }
416 }
417
418 CUDA local::B ask(bytecode_type bytecode) const {
419 // We load the variables.
420 local_universe_type r1((*sub)[bytecode.x]);
421 local_universe_type r2((*sub)[bytecode.y]);
422 local_universe_type r3((*sub)[bytecode.z]);
423 switch(bytecode.op) {
424 case EQ: return (xl == 1 && yu == zl && yl == zu) || (xu == 0 && (yu < zl || yl > zu));
425 case LEQ: return (xl == 1 && yu <= zl) || (xu == 0 && yl > zu);
426 case ADD: return (xl == xu && yl == yu && zl == zu && xl == yl + zl);
427 case MUL: return xl == xu &&
428 ((yl == yu && zl == zu && xl == yl * zl)
429 || (xl == 0 && (r2 == 0 || r3 == 0)));
430 case TDIV:
431 case CDIV:
432 case FDIV:
433 case EDIV: return (xl == xu && yl == yu && zl == zu && zl != 0 && xl == div(yl, bytecode.op, zl))
434 || (xl == yu && xu == yl && xl == 0 && (zl > 0 || zu < 0)); // 0 = 0 / z (z != 0).
435 case EMOD: return (xl == xu && yl == yu && zl == zu && zl != 0 && xl == battery::emod(yl, zl));
436 case MIN: return (xl == yu && xu == yl && yu <= zl) || (xl == zu && xu == zl && zu <= yl);
437 case MAX: return (xl == yu && xu == yl && yl >= zu) || (xl == zu && xu == zl && zl >= yu);
438 default: assert(false); return false;
439 }
440 }
441
442 CUDA INLINE value_t min(value_t a, value_t b) {
443 return battery::min(a, b);
444 }
445
446 CUDA INLINE value_t max(value_t a, value_t b) {
447 return battery::max(a, b);
448 }
449
450 // r1 = r2 / r3
451 CUDA INLINE void itv_div(Sig op, Itv& r1, Itv& r2, Itv& r3) {
452 if(zl < 0 && zu > 0) {
453 r1.lb() = max(xl, min(yl, yu == MINF ? INF : -yu));
454 r1.ub() = min(xu, max(yl == INF ? MINF : -yl, yu));
455 }
456 else {
457 if(zl == 0) { r3.lb() = 1; }
458 if(zu == 0) { r3.ub() = -1; }
459 if(yl == MINF || yu == INF || zl == MINF || zu == INF) { return; }
460 // Although it usually does not hurt to compute with bottom values, in this case, we want to prevent it from being equal to 0 (the previous conditions suppose r3 != bot).
461 if(r3.is_bot()) { return; }
462 auto t1 = div(yl, op, zl);
463 auto t2 = div(yl, op, zu);
464 auto t3 = div(yu, op, zl);
465 auto t4 = div(yu, op, zu);
466 r1.lb() = max(xl, min(min(t1, t2), min(t3, t4)));
467 r1.ub() = min(xu, max(max(t1, t2), max(t3, t4)));
468 }
469 }
470
471 CUDA INLINE void mul_inv(const Itv& r1, Itv& r2, Itv& r3) {
472 if(xl > 0 || xu < 0) {
473 if(zl == 0) { r3.lb() = 1; }
474 if(zu == 0) { r3.ub() = -1; }
475 }
476 if((xl > 0 || xu < 0) && zl < 0 && zu > 0) {
477 r2.lb() = max(yl, min(xl, xu == MINF ? INF : -xu));
478 r2.ub() = min(yu, max(xl == INF ? MINF : -xl, xu));
479 }
480 else if(xl > 0 || xu < 0 || zl > 0 || zu < 0) {
481 if(xl == MINF || xu == INF || zl == MINF || zu == INF) { return; }
482 // Although it usually does not hurt to compute with bottom values, in this case, we want to prevent it from being equal to 0 (the previous conditions suppose r3 != bot).
483 if(r3.is_bot()) { return; }
484 r2.lb() = max(yl, min(min(battery::cdiv(xl, zl), battery::cdiv(xl, zu)), min(battery::cdiv(xu, zl), battery::cdiv(xu, zu))));
485 r2.ub() = min(yu, max(max(battery::fdiv(xl, zl), battery::fdiv(xl, zu)), max(battery::fdiv(xu, zl), battery::fdiv(xu, zu))));
486 }
487 }
488
489public:
490 CUDA local::B deduce(bytecode_type bytecode) {
491 local::B has_changed = false;
492 // We load the variables.
493 Itv r1((*sub)[bytecode.x]);
494 Itv r2((*sub)[bytecode.y]);
495 Itv r3((*sub)[bytecode.z]);
496 value_t t1, t2, t3, t4; // Temporary variables for multiplication.
497
498 switch(bytecode.op) {
499 case EQ: {
500 if(r1 == ONE) {
501 has_changed |= sub->embed(bytecode.y, r3);
502 has_changed |= sub->embed(bytecode.z, r2);
503 }
504 else if(r1 == ZERO && (yl == yu || zl == zu)) {
505 has_changed |= sub->embed(zl == zu ? bytecode.y : bytecode.z, // If z is a singleton, we update y, and vice-versa.
506 Itv(
507 yl == zl ? yl + 1 : LB::top().value(),
508 yu == zu ? yu - 1 : UB::top().value()));
509 }
510 else if(yu == zl && yl == zu) { has_changed |= sub->embed(bytecode.x, ONE); }
511 else if(yl > zu || yu < zl) { has_changed |= sub->embed(bytecode.x, ZERO); }
512 return has_changed;
513 }
514 case LEQ: {
515 if(r1 == ONE) {
516 has_changed |= sub->embed(bytecode.y, Itv(yl, zu));
517 has_changed |= sub->embed(bytecode.z, Itv(yl, zu));
518 }
519 else if(r1 == ZERO) {
520 has_changed |= sub->embed(bytecode.y, Itv(zl + 1, yu));
521 has_changed |= sub->embed(bytecode.z, Itv(zl, yu - 1));
522 }
523 else if(yu <= zl) { has_changed |= sub->embed(bytecode.x, ONE); }
524 else if(yl > zu) { has_changed |= sub->embed(bytecode.x, ZERO); }
525 return has_changed;
526 }
527 case ADD: {
528 r1.lb() = (yl == MINF || zl == MINF) ? xl : max(xl, yl + zl);
529 r1.ub() = (yu == INF || zu == INF) ? xu : min(xu, yu + zu);
530 r2.lb() = (xl == MINF || zu == INF) ? yl : max(yl, xl - zu);
531 r2.ub() = (xu == INF || zl == MINF) ? yu : min(yu, xu - zl);
532 r3.lb() = (xl == MINF || yu == INF) ? zl : max(zl, xl - yu);
533 r3.ub() = (xu == INF || yl == MINF) ? zu : min(zu, xu - yl);
534 break;
535 }
536 case MUL: {
537 if(yl != MINF && yu != INF && zl != MINF && zu != INF) {
538 t1 = yl * zl;
539 t2 = yl * zu;
540 t3 = yu * zl;
541 t4 = yu * zu;
542 r1.lb() = max(xl, min(min(t1, t2), min(t3, t4)));
543 r1.ub() = min(xu, max(max(t1, t2), max(t3, t4)));
544 }
545 mul_inv(r1, r2, r3);
546 mul_inv(r1, r3, r2);
547 break;
548 }
549 case TDIV:
550 case CDIV:
551 case FDIV:
552 case EDIV: {
553 itv_div(bytecode.op, r1, r2, r3);
554 // The rest is currently not correct, we will see how to fix it.
555 // if(xl != MINF && xu != INF && zl != MINF && zu != INF) {
556 // t1 = xl * zl;
557 // t2 = xl * zu;
558 // t3 = xu * zl;
559 // t4 = xu * zu;
560 // r2.lb() = max(yl, min(min(t1, t2), min(t3, t4)));
561 // r2.ub() = min(yu, max(zu - 1, max(max(t1, t2), max(t3, t4))));
562 // }
563 // if((yl > 0 || yu < 0) && (xl > 0 || xu < 0)) { itv_div(bytecode.op, r3, r2, r1); }
564 break;
565 }
566 case EMOD: {
567 if(zl == 0) { r3.lb() = 1; }
568 if(zu == 0) { r3.ub() = -1; }
569 if(yl == yu && zl == zu) {
570 r1.lb() = battery::emod(yl, zl);
571 r1.ub() = xl;
572 }
573 break;
574 }
575 case MIN: {
576 r1.lb() = max(xl, min(yl, zl));
577 r1.ub() = min(xu, min(yu, zu));
578 r2.lb() = max(yl, xl);
579 if(xu < zl) { r2.ub() = min(yu, xu); }
580 r3.lb() = max(zl, xl);
581 if(xu < yl) { r3.ub() = min(zu, xu); }
582 break;
583 }
584 case MAX: {
585 r1.lb() = max(xl, max(yl, zl));
586 r1.ub() = min(xu, max(yu, zu));
587 r2.ub() = min(yu, xu);
588 if(xl > zu) { r2.lb() = max(yl, xl); }
589 r3.ub() = min(zu, xu);
590 if(xl > yu) { r3.lb() = max(zl, xl); }
591 break;
592 }
593 default: assert(false);
594 }
595 has_changed |= sub->embed(bytecode.x, r1);
596 has_changed |= sub->embed(bytecode.y, r2);
597 has_changed |= sub->embed(bytecode.z, r3);
598 return has_changed;
599 }
600
601#undef xl
602#undef xu
603#undef yl
604#undef yu
605#undef zl
606#undef zu
607#undef INF
608#undef MINF
609
610 // Functions forwarded to the sub-domain `A`.
611
612 /** `true` if the underlying abstract element is bot, `false` otherwise. */
613 CUDA local::B is_bot() const {
614 return sub->is_bot();
615 }
616
617 /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
618 CUDA local::B is_top() const {
619 return sub->is_top() && bytecodes->size() == 0;
620 }
621
622 CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
623 return (*sub)[x];
624 }
625
626 CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
627 return sub->project(x);
628 }
629
630 template <class Univ>
631 CUDA void project(AVar x, Univ& u) const {
632 sub->project(x, u);
633 }
634
635 CUDA int vars() const {
636 return sub->vars();
637 }
638
639 template <class Alloc2 = allocator_type>
640 CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
641 assert(static_cast<bool>(bytecodes));
642 return snapshot_type<Alloc2>(bytecodes->size(), sub->snapshot(alloc));
643 }
644
645 template <class Alloc2>
646 CUDA void restore(const snapshot_type<Alloc2>& snap) {
647 int n = bytecodes->size();
648 for(int i = snap.num_bytecodes; i < n; ++i) {
649 bytecodes->pop_back();
650 }
651 sub->restore(snap.sub_snap);
652 }
653
654 /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
655 template <class ExtractionStrategy = NonAtomicExtraction>
656 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
657 if(is_bot()) {
658 return false;
659 }
660 for(int i = 0; i < bytecodes->size(); ++i) {
661 if(!ask(i)) {
662 return false;
663 }
664 }
665 return sub->is_extractable(strategy);
666 }
667
668 /** Extract the current element into `ua`.
669 * \pre `is_extractable()` must be `true`.
670 * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
671 * (It is OK, since they are entailed, they don't bring information anymore.) */
672 template <class B>
673 CUDA void extract(B& ua) const {
674 if constexpr(impl::is_pir_like<B>::value) {
675 sub->extract(*ua.sub);
676 }
677 else {
678 sub->extract(ua);
679 }
680 }
681
682private:
683 template<class Env, class Allocator2>
684 CUDA NI TFormula<Allocator2> deinterpret(bytecode_type bytecode, const Env& env, Allocator2 allocator) const {
685 using F = TFormula<Allocator2>;
686 auto X = F::make_lvar(bytecode.x.aty(), LVar<Allocator2>(env.name_of(bytecode.x), allocator));
687 auto Y = F::make_lvar(bytecode.y.aty(), LVar<Allocator2>(env.name_of(bytecode.y), allocator));
688 auto Z = F::make_lvar(bytecode.z.aty(), LVar<Allocator2>(env.name_of(bytecode.z), allocator));
689 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.op, Z, aty(), allocator), aty(), allocator);
690 }
691
692public:
693 template<class Env, class Allocator2 = typename Env::allocator_type>
694 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, bool remove_entailed, size_t& num_entailed, Allocator2 allocator = Allocator2()) const {
695 using F = TFormula<Allocator2>;
696 typename F::Sequence seq{allocator};
697 seq.push_back(sub->deinterpret(env, allocator));
698 for(int i = 0; i < bytecodes->size(); ++i) {
699 if(remove_entailed && ask(i)) {
700 num_entailed++;
701 continue;
702 }
703 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
704 }
705 return F::make_nary(AND, std::move(seq), aty());
706 }
707
708 template<class Env, class Allocator2 = typename Env::allocator_type>
709 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
710 size_t num_entailed = 0;
711 return deinterpret(env, false, num_entailed, allocator);
712 }
713
714 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
715 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
716 using F = TFormula<Allocator2>;
717 typename F::Sequence seq{allocator};
718 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
719 for(int i = 0; i < intermediate.bytecodes.size(); ++i) {
720 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
721 }
722 return F::make_nary(AND, std::move(seq), aty());
723 }
724};
725
726}
727
728#endif
Definition pir.hpp:54
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:224
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:291
CUDA auto operator[](int x) const
Definition pir.hpp:622
CUDA allocator_type get_allocator() const
Definition pir.hpp:207
CUDA auto project(AVar x) const
Definition pir.hpp:626
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:215
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:631
typename A::universe_type universe_type
Definition pir.hpp:57
local_universe_type Itv
Definition pir.hpp:393
static constexpr const bool preserve_join
Definition pir.hpp:95
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:640
CUDA int num_deductions() const
Definition pir.hpp:383
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:243
CUDA local::B deduce(int i)
Definition pir.hpp:388
static constexpr const bool preserve_meet
Definition pir.hpp:96
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:374
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:158
CUDA int vars() const
Definition pir.hpp:635
Allocator allocator_type
Definition pir.hpp:59
typename universe_type::local_type local_universe_type
Definition pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:316
static constexpr const bool sequential
Definition pir.hpp:90
static constexpr const char * name
Definition pir.hpp:99
static constexpr const bool injective_concretization
Definition pir.hpp:97
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:98
CUDA local::B ask(int i) const
Definition pir.hpp:369
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:321
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:656
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:354
CUDA AType aty() const
Definition pir.hpp:211
typename A::allocator_type sub_allocator_type
Definition pir.hpp:60
battery::vector< bytecode_type, allocator_type > bytecodes_type
Definition pir.hpp:104
static constexpr const bool preserve_bot
Definition pir.hpp:92
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:199
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, bool remove_entailed, size_t &num_entailed, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:694
static constexpr const bool preserve_top
Definition pir.hpp:94
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:87
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:709
CUDA local::B is_bot() const
Definition pir.hpp:613
static constexpr const bool is_totally_ordered
Definition pir.hpp:91
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:646
CUDA void extract(B &ua) const
Definition pir.hpp:673
CUDA PIR(const PIR2 &other, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:166
CUDA local::B is_top() const
Definition pir.hpp:618
A sub_type
Definition pir.hpp:56
CUDA PIR(PIR &&other)
Definition pir.hpp:173
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:233
static constexpr const bool is_abstract_universe
Definition pir.hpp:89
typename Itv::LB::value_type value_t
Definition pir.hpp:394
CUDA local::B deduce(bytecode_type bytecode)
Definition pir.hpp:490
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:327
CUDA INLINE bytecode_type load_deduce(int i) const
Definition pir.hpp:359
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:715
Definition formula.hpp:8
#define INF
Definition pir.hpp:404
#define zl
Definition pir.hpp:401
#define xu
Definition pir.hpp:398
#define MINF
Definition pir.hpp:405
#define zu
Definition pir.hpp:402
#define yl
Definition pir.hpp:399
#define yu
Definition pir.hpp:400
#define xl
Definition pir.hpp:397
Definition pir.hpp:125
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:126
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:127
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:133
Definition pir.hpp:65
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition pir.hpp:67
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:66
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:70
Definition pir.hpp:37
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition pir.hpp:40
AVar x
Definition pir.hpp:39
Sig op
Definition pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition pir.hpp:44
AVar z
Definition pir.hpp:41