3 #ifndef LALA_PC_PIR_HPP
4 #define LALA_PC_PIR_HPP
6 #include "battery/vector.hpp"
7 #include "battery/unique_ptr.hpp"
8 #include "battery/shared_ptr.hpp"
9 #include "battery/root_ptr.hpp"
10 #include "battery/allocator.hpp"
11 #include "battery/algorithm.hpp"
12 #include "battery/bitset.hpp"
14 #include "lala/logic/logic.hpp"
15 #include "lala/logic/ternarize.hpp"
16 #include "lala/universes/arith_bound.hpp"
17 #include "lala/abstract_deps.hpp"
18 #include "lala/vstore.hpp"
24 template <
class A,
class Alloc>
class PIR;
28 static constexpr
bool value =
false;
30 template<
class A,
class Alloc>
31 struct is_pir_like<PIR<A, Alloc>> {
32 static constexpr
bool value =
true;
45 return i == 0 ?
x : (i == 1 ?
y :
z);
53 template <
class A,
class Allocator =
typename A::allocator_type>
63 template <
class Alloc>
80 template <
class SnapshotType>
81 CUDA
snapshot_type(
const SnapshotType& other,
const Alloc& alloc = Alloc{})
90 constexpr
static const bool sequential = sub_type::sequential;
99 constexpr
static const char*
name =
"PIR";
101 template <
class A2,
class Alloc2>
111 static_assert(
sizeof(
int) ==
sizeof(AVar),
"The size of AVar must be equal to the size of an int.");
112 static_assert(
sizeof(
int) ==
sizeof(Sig),
"The size of Sig must be equal to the size of an int.");
114 using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
115 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>,
allocator_type>;
118 bytecodes_ptr bytecodes;
120 using LB =
typename local_universe_type::LB;
121 using UB =
typename local_universe_type::UB;
124 template <
class Alloc,
class SubType>
142 template <
class InterpretedType>
148 template <
class Alloc2,
class SubType2>
152 template <
class Alloc>
155 template <
class Alloc>
159 : atype(atype), sub(std::move(sub))
160 , ZERO(local_universe_type::eq_zero())
161 , ONE(local_universe_type::eq_one())
162 , bytecodes(battery::allocate_root<bytecodes_type, allocator_type>(alloc, alloc))
167 , sub(std::move(other.sub))
168 , ZERO(std::move(other.ZERO))
169 , ONE(std::move(other.ONE))
170 , bytecodes(std::move(other.bytecodes))
176 template<
class A2,
class Alloc2,
class... Allocators>
177 CUDA
static bytecodes_ptr init_bytecodes(
const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
178 auto alloc = deps.template get_allocator<allocator_type>();
179 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
180 if(deps.is_shared_copy()) {
181 return other.bytecodes;
184 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
189 template<
class A2,
class Alloc2,
class... Allocators>
192 , sub(deps.template clone<A>(other.sub))
195 , bytecodes(init_bytecodes(other, deps))
199 return bytecodes.get_allocator();
207 AType atype_sub = UNTYPED,
211 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
216 AType atype_sub = UNTYPED,
220 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
228 AType atype_sub = env.extends_abstract_dom();
229 AType atype = env.extends_abstract_dom();
230 return bot(atype, atype_sub, alloc, sub_alloc);
238 AType atype_sub = env.extends_abstract_dom();
239 AType atype = env.extends_abstract_dom();
240 return top(atype, atype_sub, alloc, sub_alloc);
245 template <IKind kind,
bool diagnose,
class F,
class Env,
class Intermediate>
246 CUDA
bool interpret_formula(
const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics)
const {
247 if(f.type() !=
aty() && !f.is_untyped()) {
248 RETURN_INTERPRETATION_ERROR(
"The type of the formula does not match the type of this abstract domain.");
253 int left = f.seq(0).is_binary();
254 int right = f.seq(1).is_binary();
255 if(sig == EQ && (left || right)) {
256 auto& X = f.seq(left);
257 auto& Y = f.seq(right).seq(0);
258 auto& Z = f.seq(right).seq(1);
259 bytecode_type bytecode;
260 bytecode.op = f.seq(right).sig();
261 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
262 (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == SUB || bytecode.op == EDIV || bytecode.op == EMOD
263 || bytecode.op == MIN || bytecode.op == MAX
264 || bytecode.op == EQ || bytecode.op == LEQ))
266 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
267 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
268 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
270 intermediate.bytecodes.push_back(bytecode);
277 RETURN_INTERPRETATION_ERROR(
"The shape of this formula is not supported.");
281 template <IKind kind,
bool diagnose = false,
class F,
class Env,
class I>
282 CUDA NI
bool interpret(
const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics)
const {
283 size_t error_context = 0;
284 if constexpr(diagnose) {
285 diagnostics.add_suberror(IDiagnostics(
false,
name,
"Uninterpretable formula in both PIR and its sub-domain.", f));
286 error_context = diagnostics.num_suberrors();
289 AType current = f.type();
290 const_cast<F&
>(f).type_as(sub->aty());
291 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
295 if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
299 res = universe_type::preserve_join;
302 const_cast<F&
>(f).type_as(current);
304 if(f.is_binary() && f.sig() == IN && f.seq(1).is(F::S)) {
305 F g = normalize(ternarize(decompose_in_constraint(f), env));
306 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
309 else if(f.is_binary() && f.seq(1).is_binary() && f.seq(1).sig() == IN) {
310 F g = normalize(ternarize(F::make_binary(f.seq(0), f.sig(), decompose_in_constraint(f.seq(1))), env,
true));
311 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
314 else if(num_vars(f) == 1) {
315 F g = normalize(ternarize(f, env,
true));
317 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
320 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
324 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
327 if constexpr(diagnose) {
328 diagnostics.merge(res, error_context);
335 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
337 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
340 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
342 return interpret<IKind::ASK, diagnose>(f,
const_cast<Env&
>(env), ask, diagnostics);
346 template <
class Alloc2>
348 local::B has_changed = sub->deduce(t.
sub_value);
350 bytecodes->reserve(bytecodes->size() + t.
bytecodes.size());
351 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
355 battery::sorti(*bytecodes,
356 [&](
int i,
int j) {
return (*bytecodes)[i].op < (*bytecodes)[j].op; });
363 return sub->embed(x, dom);
374 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
377 return bytecode.
op == EQ
378 ? (r2 == r3 && r2.lb().value() == r2.ub().value())
379 : r2.ub().value() <= r3.lb().value();
382 else if(r1 >= ZERO) {
383 return bytecode.
op == EQ
384 ? fmeet(r2, r3).is_bot().value()
385 : r2.lb().value() > r3.ub().value();
393 right.project(bytecode.
op, r2, r3);
394 return right == r1 && r1.lb().value() == r1.ub().value();
402 int4 b4 =
reinterpret_cast<int4*
>(bytecodes->data())[i];
405 return (*bytecodes)[i];
409 CUDA local::B
ask(
int i)
const {
413 template <
class Alloc2>
415 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
424 return bytecodes->size();
434 local::B has_changed =
false;
440 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
441 r1 = (*sub)[bytecode.
x];
444 if(bytecode.
op == LEQ) {
445 r3.join_lb(LB::top());
446 r2.join_ub(UB::top());
448 has_changed |= sub->embed(bytecode.
y, r3);
449 has_changed |= sub->embed(bytecode.
z, r2);
452 else if(r1 <= ZERO) {
453 if(bytecode.
op == EQ) {
454 if(r2.lb().value() == r2.ub().value()) {
456 r1.meet_lb(LB::prev(r2.lb()));
457 r3.meet_ub(UB::prev(r2.ub()));
458 has_changed |= sub->embed(bytecode.
z, fjoin(r1,r3));
460 else if(r3.lb().value() == r3.ub().value()) {
462 r1.meet_lb(LB::prev(r3.lb()));
463 r2.meet_ub(UB::prev(r3.ub()));
464 has_changed |= sub->embed(bytecode.
y, fjoin(r1,r2));
468 assert(bytecode.
op == LEQ);
469 r2.meet_lb(LB::prev(r3.lb()));
470 r3.meet_ub(UB::prev(r2.ub()));
471 has_changed |= sub->embed(bytecode.
y, r2);
472 has_changed |= sub->embed(bytecode.
z, r3);
476 else if(r2.ub().value() <= r3.lb().value() && (bytecode.
op == LEQ || r2.lb().value() == r3.ub().value())) {
477 has_changed |= sub->embed(bytecode.
x, ONE);
480 else if(r2.lb().value() > r3.ub().value() || (bytecode.
op == EQ && r2.ub().value() < r3.lb().value())) {
481 has_changed |= sub->embed(bytecode.
x, ZERO);
487 r1.project(bytecode.
op, r2, r3);
488 has_changed |= sub->embed(bytecode.
x, r1);
491 r1 = (*sub)[bytecode.
x];
492 switch(bytecode.
op) {
499 default: assert(
false);
501 has_changed |= sub->embed(bytecode.
y, r2);
504 r2 = (*sub)[bytecode.
y];
506 switch(bytecode.
op) {
513 default: assert(
false);
515 has_changed |= sub->embed(bytecode.
z, r3);
613 return sub->is_bot();
618 return sub->is_top() && bytecodes->size() == 0;
626 return sub->project(x);
629 template <
class Univ>
638 template <
class Alloc2 = allocator_type>
643 template <
class Alloc2>
645 int n = bytecodes->size();
647 bytecodes->pop_back();
653 template <
class ExtractionStrategy = NonAtomicExtraction>
654 CUDA
bool is_extractable(
const ExtractionStrategy& strategy = ExtractionStrategy())
const {
658 for(
int i = 0; i < bytecodes->size(); ++i) {
663 return sub->is_extractable(strategy);
672 if constexpr(impl::is_pir_like<B>::value) {
673 sub->extract(*ua.sub);
681 template<
class Env,
class Allocator2>
682 CUDA NI TFormula<Allocator2> deinterpret(
bytecode_type bytecode,
const Env& env, Allocator2 allocator)
const {
683 using F = TFormula<Allocator2>;
684 auto X = F::make_lvar(bytecode.
x.aty(), LVar<Allocator2>(env.name_of(bytecode.
x), allocator));
685 auto Y = F::make_lvar(bytecode.
y.aty(), LVar<Allocator2>(env.name_of(bytecode.
y), allocator));
686 auto Z = F::make_lvar(bytecode.
z.aty(), LVar<Allocator2>(env.name_of(bytecode.
z), allocator));
687 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.
op, Z,
aty(), allocator),
aty(), allocator);
691 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
692 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env, Allocator2 allocator = Allocator2())
const {
693 using F = TFormula<Allocator2>;
694 typename F::Sequence seq{allocator};
695 seq.push_back(sub->deinterpret(env, allocator));
696 for(
int i = 0; i < bytecodes->size(); ++i) {
697 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
699 return F::make_nary(AND, std::move(seq),
aty());
702 template<
class I,
class Env,
class Allocator2 =
typename Env::allocator_type>
703 CUDA NI TFormula<Allocator2>
deinterpret(
const I& intermediate,
const Env& env, Allocator2 allocator = Allocator2())
const {
704 using F = TFormula<Allocator2>;
705 typename F::Sequence seq{allocator};
706 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
707 for(
int i = 0; i < intermediate.bytecodes.size(); ++i) {
708 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
710 return F::make_nary(AND, std::move(seq),
aty());
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:215
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: pir.hpp:282
CUDA auto operator[](int x) const
Definition: pir.hpp:621
CUDA allocator_type get_allocator() const
Definition: pir.hpp:198
CUDA auto project(AVar x) const
Definition: pir.hpp:625
constexpr static const bool preserve_bot
Definition: pir.hpp:92
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:206
CUDA void project(AVar x, Univ &u) const
Definition: pir.hpp:630
typename A::universe_type universe_type
Definition: pir.hpp:57
constexpr static const char * name
Definition: pir.hpp:99
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pir.hpp:703
constexpr static const bool is_abstract_universe
Definition: pir.hpp:89
CUDA int num_deductions() const
Definition: pir.hpp:423
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:234
constexpr static const bool preserve_top
Definition: pir.hpp:94
CUDA local::B deduce(int i)
Definition: pir.hpp:428
constexpr static const bool preserve_join
Definition: pir.hpp:95
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition: pir.hpp:414
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition: pir.hpp:158
CUDA int vars() const
Definition: pir.hpp:634
Allocator allocator_type
Definition: pir.hpp:59
typename universe_type::local_type local_universe_type
Definition: pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition: pir.hpp:336
constexpr static const bool injective_concretization
Definition: pir.hpp:97
constexpr static const bool preserve_concrete_covers
Definition: pir.hpp:98
constexpr static const bool preserve_meet
Definition: pir.hpp:96
CUDA local::B ask(int i) const
Definition: pir.hpp:409
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition: pir.hpp:341
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition: pir.hpp:654
CUDA bool embed(AVar x, const universe_type &dom)
Definition: pir.hpp:362
CUDA AType aty() const
Definition: pir.hpp:202
typename A::allocator_type sub_allocator_type
Definition: pir.hpp:60
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition: pir.hpp:190
abstract_ptr< sub_type > sub_ptr
Definition: pir.hpp:87
CUDA local::B is_bot() const
Definition: pir.hpp:612
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition: pir.hpp:644
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition: pir.hpp:692
CUDA void extract(B &ua) const
Definition: pir.hpp:671
CUDA local::B is_top() const
Definition: pir.hpp:617
A sub_type
Definition: pir.hpp:56
CUDA PIR(PIR &&other)
Definition: pir.hpp:165
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: pir.hpp:224
constexpr static const bool is_totally_ordered
Definition: pir.hpp:91
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition: pir.hpp:639
constexpr static const bool sequential
Definition: pir.hpp:90
CUDA local::B deduce(bytecode_type bytecode)
Definition: pir.hpp:433
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition: pir.hpp:347
CUDA INLINE bytecode_type load_deduce(int i) const
Definition: pir.hpp:399
Definition: formula.hpp:8
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition: pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition: pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition: pir.hpp:126
friend struct interpreted_type
Definition: pir.hpp:149
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition: pir.hpp:127
interpreted_type & operator=(interpreted_type &&)=default
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition: pir.hpp:133
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition: pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition: pir.hpp:67
sub_snap_type sub_snap
Definition: pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition: pir.hpp:66
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition: pir.hpp:70
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition: pir.hpp:40
AVar x
Definition: pir.hpp:39
Sig op
Definition: pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition: pir.hpp:44
AVar z
Definition: pir.hpp:41
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:276
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:310
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:296
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:249
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:218
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition: terms.hpp:222