Lattice land propagators completion library
Loading...
Searching...
No Matches
pir.hpp
Go to the documentation of this file.
1// Copyright 2024 Pierre Talbot
2
3#ifndef LALA_PC_PIR_HPP
4#define LALA_PC_PIR_HPP
5
6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12
13#include "lala/logic/logic.hpp"
14#include "lala/logic/ternarize.hpp"
15#include "lala/universes/arith_bound.hpp"
16#include "lala/abstract_deps.hpp"
17#include "lala/vstore.hpp"
18
19#include "terms.hpp"
20
21namespace lala {
22
23template <class A, class Alloc> class PIR;
24namespace impl {
25 template <class>
26 struct is_pir_like {
27 static constexpr bool value = false;
28 };
29 template<class A, class Alloc>
30 struct is_pir_like<PIR<A, Alloc>> {
31 static constexpr bool value = true;
32 };
33}
34
35 // union bytecode_type
36 // {
37 // This represents the constraints `X = Y [op] Z`.
39 Sig op;
40 AVar x;
41 AVar y;
42 AVar z;
43 };
44 // int4 code;
45 // };
46
47/** PIR is an abstract transformer built on top of an abstract domain `A`.
48 It is expected that `A` has a projection function `u = project(x)`.
49 We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
50 An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
51template <class A, class Allocator = typename A::allocator_type>
52class PIR {
53public:
54 using sub_type = A;
55 using universe_type = typename A::universe_type;
56 using local_universe_type = typename universe_type::local_type;
57 using allocator_type = Allocator;
58 using sub_allocator_type = typename A::allocator_type;
60
61 template <class Alloc>
63 {
67
72
77
78 template <class SnapshotType>
79 CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
81 , sub_snap(other.sub_snap, alloc)
82 {}
83 };
84
85 using sub_ptr = abstract_ptr<sub_type>;
86
87 constexpr static const bool is_abstract_universe = false;
88 constexpr static const bool sequential = sub_type::sequential;
89 constexpr static const bool is_totally_ordered = false;
90 constexpr static const bool preserve_bot = true;
91 // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
92 constexpr static const bool preserve_top = sub_type::preserve_top;
93 constexpr static const bool preserve_join = sub_type::preserve_join;
94 constexpr static const bool preserve_meet = sub_type::preserve_meet;
95 constexpr static const bool injective_concretization = sub_type::injective_concretization;
96 constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
97 constexpr static const char* name = "PIR";
98
99 template <class A2, class Alloc2>
100 friend class PIR;
101
102private:
103 AType atype;
104 sub_ptr sub;
105
106 const local_universe_type ZERO;
107 const local_universe_type ONE;
108
109 static_assert(sizeof(int) == sizeof(AVar), "The size of AVar must be equal to the size of an int.");
110 static_assert(sizeof(int) == sizeof(Sig), "The size of Sig must be equal to the size of an int.");
111
112 using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
113 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>, allocator_type>;
114
115 /** We represent the constraints X = Y [op] Z. */
116 bytecodes_ptr bytecodes;
117
118 using LB = typename local_universe_type::LB;
119 using UB = typename local_universe_type::UB;
120
121public:
122 template <class Alloc, class SubType>
124 SubType sub_value;
125 battery::vector<bytecode_type, Alloc> bytecodes;
126
130
131 CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
133 , bytecodes(alloc)
134 {}
135
136 CUDA interpreted_type(const Alloc& alloc = Alloc{})
137 : interpreted_type(SubType(alloc), alloc)
138 {}
139
140 template <class InterpretedType>
141 CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
142 : sub_value(other.sub_value, alloc)
143 , bytecodes(other.bytecodes, alloc)
144 {}
145
146 template <class Alloc2, class SubType2>
147 friend struct interpreted_type;
148 };
149
150 template <class Alloc>
152
153 template <class Alloc>
155
156 CUDA PIR(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
157 : atype(atype), sub(std::move(sub))
158 , ZERO(local_universe_type::eq_zero())
159 , ONE(local_universe_type::eq_one())
160 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, alloc))
161 {}
162
163 CUDA PIR(PIR&& other)
164 : atype(other.atype)
165 , sub(std::move(other.sub))
166 , ZERO(std::move(other.ZERO))
167 , ONE(std::move(other.ONE))
168 , bytecodes(std::move(other.bytecodes))
169 {}
170
171private:
172 // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
173 // This allows to save up memory and to avoid contention on L2 cache among blocks.
174 template<class A2, class Alloc2, class... Allocators>
175 CUDA static bytecodes_ptr init_bytecodes(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
176 auto alloc = deps.template get_allocator<allocator_type>();
177 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
178 if(deps.is_shared_copy()) {
179 return other.bytecodes;
180 }
181 }
182 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
183 return std::move(r);
184 }
185
186public:
187 template<class A2, class Alloc2, class... Allocators>
188 CUDA PIR(const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
189 : atype(other.atype)
190 , sub(deps.template clone<A>(other.sub))
191 , ZERO(other.ZERO)
192 , ONE(other.ONE)
193 , bytecodes(init_bytecodes(other, deps))
194 {}
195
197 return bytecodes.get_allocator();
198 }
199
200 CUDA AType aty() const {
201 return atype;
202 }
203
204 CUDA static this_type bot(AType atype = UNTYPED,
205 AType atype_sub = UNTYPED,
206 const allocator_type& alloc = allocator_type(),
207 const sub_allocator_type& sub_alloc = sub_allocator_type())
208 {
209 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
210 }
211
212 /** A special symbolic element representing top. */
213 CUDA static this_type top(AType atype = UNTYPED,
214 AType atype_sub = UNTYPED,
215 const allocator_type& alloc = allocator_type(),
216 const sub_allocator_type& sub_alloc = sub_allocator_type())
217 {
218 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
219 }
220
221 template <class Env>
222 CUDA static this_type bot(Env& env,
223 const allocator_type& alloc = allocator_type(),
224 const sub_allocator_type& sub_alloc = sub_allocator_type())
225 {
226 AType atype_sub = env.extends_abstract_dom();
227 AType atype = env.extends_abstract_dom();
228 return bot(atype, atype_sub, alloc, sub_alloc);
229 }
230
231 template <class Env>
232 CUDA static this_type top(Env& env,
233 const allocator_type& alloc = allocator_type(),
234 const sub_allocator_type& sub_alloc = sub_allocator_type())
235 {
236 AType atype_sub = env.extends_abstract_dom();
237 AType atype = env.extends_abstract_dom();
238 return top(atype, atype_sub, alloc, sub_alloc);
239 }
240
241private:
242 /** We interpret the formula `f` in the value `intermediate`, note that we only add one constraint to `intermediate` if the interpretation succeeds. */
243 template <IKind kind, bool diagnose, class F, class Env, class Intermediate>
244 CUDA bool interpret_formula(const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics) const {
245 if(f.type() != aty() && !f.is_untyped()) {
246 RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
247 }
248 if(f.is_binary()) {
249 Sig sig = f.sig();
250 // Expect constraint of the form X = Y <OP> Z, or Y <OP> Z = X.
251 int left = f.seq(0).is_binary();
252 int right = f.seq(1).is_binary();
253 if(sig == EQ && (left || right)) {
254 auto& X = f.seq(left);
255 auto& Y = f.seq(right).seq(0);
256 auto& Z = f.seq(right).seq(1);
257 bytecode_type bytecode;
258 bytecode.op = f.seq(right).sig();
259 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
260 (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == SUB || bytecode.op == EDIV || bytecode.op == EMOD
261 || bytecode.op == MIN || bytecode.op == MAX
262 || bytecode.op == EQ || bytecode.op == LEQ))
263 {
264 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
265 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
266 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
267 {
268 intermediate.bytecodes.push_back(bytecode);
269 return true;
270 }
271 return false;
272 }
273 }
274 }
275 RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
276 }
277
278public:
279 template <IKind kind, bool diagnose = false, class F, class Env, class I>
280 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
281 size_t error_context = 0;
282 if constexpr(diagnose) {
283 diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PIR and its sub-domain.", f));
284 error_context = diagnostics.num_suberrors();
285 }
286 bool res = false;
287 AType current = f.type();
288 const_cast<F&>(f).type_as(sub->aty()); // We will restore the type after the call to sub->interpret.
289 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
290 // A successful interpretation in the sub-domain does not mean it is interpreted exactly.
291 // Sometimes, we can improve the precision by interpreting it in PC.
292 // This is the case of `x in S` predicate for sub-domain that do not preserve meet.
293 if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
294 res = true; // it is not a formula `x in S`.
295 }
296 else {
297 res = universe_type::preserve_join; // it is `x in S` but it preserves join.
298 }
299 }
300 const_cast<F&>(f).type_as(current);
301 if(!res) {
302 if(f.is_binary() && f.sig() == IN && f.seq(1).is(F::S)) {
303 F g = normalize(ternarize(decompose_in_constraint(f), env));
304 res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
305 }
306 /** If `t1 <op> t2 in S` we decompose `t2 in S`. */
307 else if(f.is_binary() && f.seq(1).is_binary() && f.seq(1).sig() == IN) {
308 F g = normalize(ternarize(F::make_binary(f.seq(0), f.sig(), decompose_in_constraint(f.seq(1))), env, true));
309 res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
310 }
311 /** It is a unary constraint but that could not be interpreted by the underlying store. */
312 else if(num_vars(f) == 1) {
313 F g = normalize(ternarize(f, env, true));
314 if(g != f) {
315 res = ginterpret_in<kind, diagnose>(*this, g, env, intermediate, diagnostics);
316 }
317 else {
318 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
319 }
320 }
321 else {
322 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
323 }
324 }
325 if constexpr(diagnose) {
326 diagnostics.merge(res, error_context);
327 }
328 return res;
329 }
330
331 /** PIR expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
332 */
333 template <bool diagnose = false, class F, class Env, class Alloc2>
334 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
335 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
336 }
337
338 template <bool diagnose = false, class F, class Env, class Alloc2>
339 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
340 return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
341 }
342
343 /** Similar limitations than `PC::deduce`. */
344 template <class Alloc2>
345 CUDA local::B deduce(const tell_type<Alloc2>& t) {
346 local::B has_changed = sub->deduce(t.sub_value);
347 if(t.bytecodes.size() > 0) {
348 bytecodes->reserve(bytecodes->size() + t.bytecodes.size());
349 for(int i = 0; i < t.bytecodes.size(); ++i) {
350 bytecodes->push_back(t.bytecodes[i]);
351 }
352 /** This is sorting the constraints `X = Y <op> Z` according to <OP>. */
353 battery::sorti(*bytecodes,
354 [&](int i, int j) { return (*bytecodes)[i].op < (*bytecodes)[j].op; });
355 has_changed = true;
356 }
357 return has_changed;
358 }
359
360 CUDA bool embed(AVar x, const universe_type& dom) {
361 return sub->embed(x, dom);
362 }
363
364private:
365 CUDA local::B ask(bytecode_type bytecode) const {
366 // We load the variables.
367 local_universe_type r1((*sub)[bytecode.x]);
368 local_universe_type r2((*sub)[bytecode.y]);
369 local_universe_type r3((*sub)[bytecode.z]);
370
371 // Reified constraint: X = (Y = Z) and X = (Y <= Z).
372 if(bytecode.op == EQ || bytecode.op == LEQ) {
373 // Y [op] Z
374 if(r1 >= ONE) {
375 return bytecode.op == EQ
376 ? (r2 == r3 && r2.lb().value() == r2.ub().value())
377 : r2.ub().value() <= r3.lb().value();
378 }
379 // not (Y [op] Z)
380 else if(r1 >= ZERO) {
381 return bytecode.op == EQ
382 ? fmeet(r2, r3).is_bot().value()
383 : r2.lb().value() > r3.ub().value();
384 }
385 // We need `r1` to be a singleton to decide whether the constraint is entailed.
386 return false;
387 }
388 // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
389 else {
391 right.project(bytecode.op, r2, r3);
392 return right == r1 && r1.lb().value() == r1.ub().value();
393 }
394 }
395
396public:
397 CUDA local::B ask(size_t i) const {
398 return ask((*bytecodes)[i]);
399 }
400
401 template <class Alloc2>
402 CUDA local::B ask(const ask_type<Alloc2>& t) const {
403 for(int i = 0; i < t.bytecodes.size(); ++i) {
404 if(!ask(t.bytecodes[i])) {
405 return false;
406 }
407 }
408 return sub->ask(t.sub_value);
409 }
410
411 CUDA size_t num_deductions() const {
412 return bytecodes->size();
413 }
414
415public:
416 CUDA local::B deduce(size_t i) {
417 assert(i < num_deductions());
418
419 // Vectorize load (int4).
420 bytecode_type bytecode = (*bytecodes)[i];
421
422 local::B has_changed = false;
423
424 // We load the variables.
426 local_universe_type r2((*sub)[bytecode.y]);
427 local_universe_type r3((*sub)[bytecode.z]);
428 // Reified constraint: X = (Y = Z) and X = (Y <= Z).
429 if(bytecode.op == EQ || bytecode.op == LEQ) {
430 r1 = (*sub)[bytecode.x];
431 // Y [op] Z
432 if(r1 <= ONE) {
433 if(bytecode.op == EQ) {
434 has_changed |= sub->embed(bytecode.y, r3);
435 has_changed |= sub->embed(bytecode.z, r2);
436 }
437 else {
438 assert(bytecode.op == LEQ);
439 r3.join_lb(LB::top());
440 r2.join_ub(UB::top());
441 has_changed |= sub->embed(bytecode.y, r3);
442 has_changed |= sub->embed(bytecode.z, r2);
443 }
444 }
445 // not (Y [op] Z)
446 else if(r1 <= ZERO) {
447 if(bytecode.op == EQ) {
448 if(r2.lb().value() == r2.ub().value()) {
449 r1 = r3;
450 r1.meet_lb(LB::prev(r2.lb()));
451 r3.meet_ub(UB::prev(r2.ub()));
452 has_changed |= sub->embed(bytecode.z, fjoin(r1,r3));
453 }
454 else if(r3.lb().value() == r3.ub().value()) {
455 r1 = r2;
456 r1.meet_lb(LB::prev(r3.lb()));
457 r2.meet_ub(UB::prev(r3.ub()));
458 has_changed |= sub->embed(bytecode.y, fjoin(r1,r2));
459 }
460 }
461 else {
462 assert(bytecode.op == LEQ);
463 r2.meet_lb(LB::prev(r3.lb()));
464 r3.meet_ub(UB::prev(r2.ub()));
465 has_changed |= sub->embed(bytecode.y, r2);
466 has_changed |= sub->embed(bytecode.z, r3);
467 }
468 }
469 // X <- 1
470 else if(r2.ub().value() <= r3.lb().value() && (bytecode.op == LEQ || r2.lb().value() == r3.ub().value())) {
471 has_changed |= sub->embed(bytecode.x, ONE);
472 }
473 // X <- 0
474 else if(r2.lb().value() > r3.ub().value() || (bytecode.op == EQ && r2.ub().value() < r3.lb().value())) {
475 has_changed |= sub->embed(bytecode.x, ZERO);
476 }
477 }
478 // Arithmetic constraint: X = Y + Z, X = Y - Z, ...
479 else {
480 // X <- Y [op] Z
481 r1.project(bytecode.op, r2, r3);
482 has_changed |= sub->embed(bytecode.x, r1);
483
484 // Y <- X <left residual> Z
485 r1 = (*sub)[bytecode.x];
486 switch(bytecode.op) {
487 case ADD: pc::GroupAdd<local_universe_type>::left_residual(r1, r3, r2); break;
488 case SUB: pc::GroupSub<local_universe_type>::left_residual(r1, r3, r2); break;
490 case EDIV: pc::GroupDiv<local_universe_type, EDIV>::left_residual(r1, r3, r2); break;
493 default: assert(false);
494 }
495 has_changed |= sub->embed(bytecode.y, r2);
496
497 // Z <- X <right residual> Y
498 r2 = (*sub)[bytecode.y];
499 r3.join_top();
500 switch(bytecode.op) {
501 case ADD: pc::GroupAdd<local_universe_type>::right_residual(r1, r2, r3); break;
502 case SUB: pc::GroupSub<local_universe_type>::right_residual(r1, r2, r3); break;
507 default: assert(false);
508 }
509 has_changed |= sub->embed(bytecode.z, r3);
510 }
511 return has_changed;
512 }
513
514 // Functions forwarded to the sub-domain `A`.
515
516 /** `true` if the underlying abstract element is bot, `false` otherwise. */
517 CUDA local::B is_bot() const {
518 return sub->is_bot();
519 }
520
521 /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
522 CUDA local::B is_top() const {
523 return sub->is_top() && bytecodes->size() == 0;
524 }
525
526 CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
527 return (*sub)[x];
528 }
529
530 CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
531 return sub->project(x);
532 }
533
534 template <class Univ>
535 CUDA void project(AVar x, Univ& u) const {
536 sub->project(x, u);
537 }
538
539 CUDA size_t vars() const {
540 return sub->vars();
541 }
542
543 template <class Alloc2 = allocator_type>
544 CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
545 return snapshot_type<Alloc2>(bytecodes->size(), sub->snapshot(alloc));
546 }
547
548 template <class Alloc2>
549 CUDA void restore(const snapshot_type<Alloc2>& snap) {
550 size_t n = bytecodes->size();
551 for(size_t i = snap.num_bytecodes; i < n; ++i) {
552 bytecodes->pop_back();
553 }
554 sub->restore(snap.sub_snap);
555 }
556
557 /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
558 template <class ExtractionStrategy = NonAtomicExtraction>
559 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
560 if(is_bot()) {
561 return false;
562 }
563 for(int i = 0; i < bytecodes->size(); ++i) {
564 if(!ask(i)) {
565 return false;
566 }
567 }
568 return sub->is_extractable(strategy);
569 }
570
571 /** Extract the current element into `ua`.
572 * \pre `is_extractable()` must be `true`.
573 * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
574 * (It is OK, since they are entailed, they don't bring information anymore.) */
575 template <class B>
576 CUDA void extract(B& ua) const {
577 if constexpr(impl::is_pir_like<B>::value) {
578 sub->extract(*ua.sub);
579 }
580 else {
581 sub->extract(ua);
582 }
583 }
584
585private:
586 template<class Env, class Allocator2>
587 CUDA NI TFormula<Allocator2> deinterpret(bytecode_type bytecode, const Env& env, Allocator2 allocator) const {
588 using F = TFormula<Allocator2>;
589 auto X = F::make_lvar(bytecode.x.aty(), LVar<Allocator2>(env.name_of(bytecode.x), allocator));
590 auto Y = F::make_lvar(bytecode.y.aty(), LVar<Allocator2>(env.name_of(bytecode.y), allocator));
591 auto Z = F::make_lvar(bytecode.z.aty(), LVar<Allocator2>(env.name_of(bytecode.z), allocator));
592 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.op, Z, aty(), allocator), aty(), allocator);
593 }
594public:
595
596 template<class Env, class Allocator2 = typename Env::allocator_type>
597 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
598 using F = TFormula<Allocator2>;
599 typename F::Sequence seq{allocator};
600 seq.push_back(sub->deinterpret(env, allocator));
601 for(int i = 0; i < bytecodes->size(); ++i) {
602 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
603 }
604 return F::make_nary(AND, std::move(seq), aty());
605 }
606
607 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
608 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
609 using F = TFormula<Allocator2>;
610 typename F::Sequence seq{allocator};
611 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
612 for(int i = 0; i < intermediate.bytecodes.size(); ++i) {
613 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
614 }
615 return F::make_nary(AND, std::move(seq), aty());
616 }
617};
618
619}
620
621#endif
Definition pir.hpp:52
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:213
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:280
CUDA auto operator[](int x) const
Definition pir.hpp:526
CUDA allocator_type get_allocator() const
Definition pir.hpp:196
CUDA auto project(AVar x) const
Definition pir.hpp:530
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:204
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:535
typename A::universe_type universe_type
Definition pir.hpp:55
CUDA size_t num_deductions() const
Definition pir.hpp:411
static constexpr const bool preserve_join
Definition pir.hpp:93
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:544
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:232
static constexpr const bool preserve_meet
Definition pir.hpp:94
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:402
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:156
Allocator allocator_type
Definition pir.hpp:57
typename universe_type::local_type local_universe_type
Definition pir.hpp:56
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:334
static constexpr const bool sequential
Definition pir.hpp:88
CUDA local::B deduce(size_t i)
Definition pir.hpp:416
static constexpr const char * name
Definition pir.hpp:97
CUDA size_t vars() const
Definition pir.hpp:539
static constexpr const bool injective_concretization
Definition pir.hpp:95
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:96
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:339
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:559
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:360
CUDA AType aty() const
Definition pir.hpp:200
typename A::allocator_type sub_allocator_type
Definition pir.hpp:58
static constexpr const bool preserve_bot
Definition pir.hpp:90
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:188
static constexpr const bool preserve_top
Definition pir.hpp:92
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:85
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:597
CUDA local::B is_bot() const
Definition pir.hpp:517
static constexpr const bool is_totally_ordered
Definition pir.hpp:89
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:549
CUDA void extract(B &ua) const
Definition pir.hpp:576
CUDA local::B is_top() const
Definition pir.hpp:522
A sub_type
Definition pir.hpp:54
CUDA PIR(PIR &&other)
Definition pir.hpp:163
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:222
static constexpr const bool is_abstract_universe
Definition pir.hpp:87
CUDA local::B ask(size_t i) const
Definition pir.hpp:397
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:345
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:608
Definition formula.hpp:8
Definition pir.hpp:123
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:136
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:141
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:124
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:125
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:131
Definition pir.hpp:63
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:79
snapshot_type(const snapshot_type< Alloc > &)=default
size_t num_bytecodes
Definition pir.hpp:65
snapshot_type(snapshot_type< Alloc > &&)=default
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:66
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:64
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(size_t num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:68
Definition pir.hpp:38
AVar y
Definition pir.hpp:41
AVar x
Definition pir.hpp:40
Sig op
Definition pir.hpp:39
AVar z
Definition pir.hpp:42
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:276
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:310
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:296
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:249
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:218
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:222