6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12#include "battery/bitset.hpp"
14#include "lala/logic/logic.hpp"
15#include "lala/logic/ternarize.hpp"
16#include "lala/universes/arith_bound.hpp"
17#include "lala/abstract_deps.hpp"
18#include "lala/vstore.hpp"
24template <
class A,
class Alloc>
class PIR;
28 static constexpr bool value =
false;
30 template<
class A,
class Alloc>
31 struct is_pir_like<PIR<A, Alloc>> {
32 static constexpr bool value =
true;
45 return i == 0 ?
x : (i == 1 ?
y :
z);
53template <
class A,
class Allocator =
typename A::allocator_type>
63 template <
class Alloc>
80 template <
class SnapshotType>
81 CUDA
snapshot_type(
const SnapshotType& other,
const Alloc& alloc = Alloc{})
90 constexpr static const bool sequential = sub_type::sequential;
99 constexpr static const char*
name =
"PIR";
101 template <
class A2,
class Alloc2>
112 static_assert(
sizeof(int) ==
sizeof(AVar),
"The size of AVar must be equal to the size of an int.");
113 static_assert(
sizeof(int) ==
sizeof(Sig),
"The size of Sig must be equal to the size of an int.");
115 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>,
allocator_type>;
118 bytecodes_ptr bytecodes;
120 using LB =
typename local_universe_type::LB;
121 using UB =
typename local_universe_type::UB;
124 template <
class Alloc,
class SubType>
142 template <
class InterpretedType>
148 template <
class Alloc2,
class SubType2>
152 template <
class Alloc>
155 template <
class Alloc>
159 : atype(atype), sub(std::move(sub))
165 template <
class PIR2>
167 : atype(atype), sub(sub)
175 , sub(std::move(other.sub))
176 , ZERO(std::move(other.ZERO))
177 , ONE(std::move(other.ONE))
178 , bytecodes(std::move(other.bytecodes))
184 template<
class A2,
class Alloc2,
class... Allocators>
185 CUDA
static bytecodes_ptr init_bytecodes(
const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
186 auto alloc = deps.template get_allocator<allocator_type>();
187 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
188 if(deps.is_shared_copy()) {
189 assert(
static_cast<bool>(other.bytecodes));
190 return other.bytecodes;
193 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
198 template<
class A2,
class Alloc2,
class... Allocators>
201 , sub(deps.template clone<A>(other.sub))
204 , bytecodes(init_bytecodes(other, deps))
208 return bytecodes.get_allocator();
216 AType atype_sub = UNTYPED,
220 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
225 AType atype_sub = UNTYPED,
229 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
237 AType atype_sub = env.extends_abstract_dom();
238 AType atype = env.extends_abstract_dom();
239 return bot(atype, atype_sub, alloc, sub_alloc);
247 AType atype_sub = env.extends_abstract_dom();
248 AType atype = env.extends_abstract_dom();
249 return top(atype, atype_sub, alloc, sub_alloc);
254 template <IKind kind,
bool diagnose,
class F,
class Env,
class Intermediate>
255 CUDA
bool interpret_formula(
const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics)
const {
256 if(f.type() !=
aty() && !f.is_untyped()) {
257 RETURN_INTERPRETATION_ERROR(
"The type of the formula does not match the type of this abstract domain.");
262 int left = f.seq(0).is_binary() ? 1 : 0;
263 int right = f.seq(1).is_binary() ? 1 : 0;
264 if(sig == EQ && (left + right == 1)) {
265 auto& X = f.seq(left);
266 auto& Y = f.seq(right).seq(0);
267 auto& Z = f.seq(right).seq(1);
268 bytecode_type bytecode;
269 bytecode.op = f.seq(right).sig();
270 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
271 (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == EDIV || bytecode.op == EMOD
272 || bytecode.op == MIN || bytecode.op == MAX
273 || bytecode.op == EQ || bytecode.op == LEQ))
275 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
276 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
277 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
279 intermediate.bytecodes.push_back(bytecode);
282 RETURN_INTERPRETATION_ERROR(
"Could not interpret the variables in the environment.");
286 RETURN_INTERPRETATION_ERROR(
"The shape of this formula is not supported.");
290 template <IKind kind,
bool diagnose = false,
class F,
class Env,
class I>
291 CUDA NI
bool interpret(
const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics)
const {
292 size_t error_context = 0;
293 if constexpr(diagnose) {
294 diagnostics.add_suberror(IDiagnostics(
false,
name,
"Uninterpretable formula in both PIR and its sub-domain.", f));
295 error_context = diagnostics.num_suberrors();
298 AType current = f.type();
299 const_cast<F&
>(f).type_as(sub->aty());
300 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
303 const_cast<F&
>(f).type_as(current);
305 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
307 if constexpr(diagnose) {
308 diagnostics.merge(res, error_context);
315 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
317 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
320 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
322 return interpret<IKind::ASK, diagnose>(f,
const_cast<Env&
>(env), ask, diagnostics);
326 template <
class Alloc2>
328 local::B has_changed = sub->deduce(t.
sub_value);
330 bytecodes->reserve(bytecodes->size() + t.
bytecodes.size());
331 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
337 battery::sorti(*bytecodes,
338 [&](
int i,
int j) {
return (*bytecodes)[i].op < (*bytecodes)[j].op; });
340 std::stable_sort(bytecodes->data(), bytecodes->data() + bytecodes->size(),
343 return a.op == b.op ? (a.y.vid() == b.y.vid() ? (a.x.vid() == b.x.vid() ? a.z.vid() < b.z.vid() : a.x.vid() < b.x.vid()) : a.y.vid() < b.y.vid()) : a.op < b.op;
352 return sub->embed(x, dom);
363 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
366 return bytecode.
op == EQ
367 ? (r2 == r3 && r2.lb().value() == r2.ub().value())
368 : r2.ub().value() <= r3.lb().value();
371 else if(r1 <= ZERO) {
372 return bytecode.
op == EQ
373 ? fmeet(r2, r3).is_bot().value()
374 : r2.lb().value() > r3.ub().value();
382 right.project(bytecode.
op, r2, r3);
383 return right == r1 && r1.lb().value() == r1.ub().value();
391 int4 b4 =
reinterpret_cast<int4*
>(bytecodes->data())[i];
394 return (*bytecodes)[i];
398 CUDA local::B
ask(
int i)
const {
402 template <
class Alloc2>
404 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
413 return bytecodes->size();
423 local::B has_changed =
false;
429 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
432 if(bytecode.
op == LEQ) {
433 r3.join_lb(LB::top());
434 r2.join_ub(UB::top());
436 has_changed |= sub->embed(bytecode.
y, r3);
437 has_changed |= sub->embed(bytecode.
z, r2);
440 else if(r1 <= ZERO) {
441 if(bytecode.
op == EQ) {
442 if(r2.lb().value() == r2.ub().value()) {
444 r1.meet_lb(LB::prev(r2.lb()));
445 r3.meet_ub(UB::prev(r2.ub()));
446 has_changed |= sub->embed(bytecode.
z, fjoin(r1,r3));
448 else if(r3.lb().value() == r3.ub().value()) {
450 r1.meet_lb(LB::prev(r3.lb()));
451 r2.meet_ub(UB::prev(r3.ub()));
452 has_changed |= sub->embed(bytecode.
y, fjoin(r1,r2));
456 assert(bytecode.
op == LEQ);
457 r2.meet_lb(LB::prev(r3.lb()));
458 r3.meet_ub(UB::prev(r2.ub()));
459 has_changed |= sub->embed(bytecode.
y, r2);
460 has_changed |= sub->embed(bytecode.
z, r3);
464 else if(r2.ub().value() <= r3.lb().value() && (bytecode.
op == LEQ || r2.lb().value() == r3.ub().value())) {
465 has_changed |= sub->embed(bytecode.
x, ONE);
468 else if(r2.lb().value() > r3.ub().value() || (bytecode.
op == EQ && r2.ub().value() < r3.lb().value())) {
469 has_changed |= sub->embed(bytecode.
x, ZERO);
475 r1.project(bytecode.
op, r2, r3);
476 has_changed |= sub->embed(bytecode.
x, r1);
479 switch(bytecode.
op) {
484 if(r1.is_bot() || r3.is_bot()) {
491 default: assert(
false);
493 has_changed |= sub->embed(bytecode.
y, r2);
496 switch(bytecode.
op) {
501 if(r1.is_bot() || r2.is_bot()) {
508 default: assert(
false);
510 has_changed |= sub->embed(bytecode.
z, r3);
608 return sub->is_bot();
613 return sub->is_top() && bytecodes->size() == 0;
621 return sub->project(x);
624 template <
class Univ>
633 template <
class Alloc2 = allocator_type>
635 assert(
static_cast<bool>(bytecodes));
639 template <
class Alloc2>
641 int n = bytecodes->size();
643 bytecodes->pop_back();
649 template <
class ExtractionStrategy = NonAtomicExtraction>
650 CUDA
bool is_extractable(
const ExtractionStrategy& strategy = ExtractionStrategy())
const {
654 for(
int i = 0; i < bytecodes->size(); ++i) {
659 return sub->is_extractable(strategy);
668 if constexpr(impl::is_pir_like<B>::value) {
669 sub->extract(*ua.sub);
677 template<
class Env,
class Allocator2>
678 CUDA NI TFormula<Allocator2> deinterpret(
bytecode_type bytecode,
const Env& env, Allocator2 allocator)
const {
679 using F = TFormula<Allocator2>;
680 auto X = F::make_lvar(bytecode.
x.aty(), LVar<Allocator2>(env.name_of(bytecode.
x), allocator));
681 auto Y = F::make_lvar(bytecode.
y.aty(), LVar<Allocator2>(env.name_of(bytecode.
y), allocator));
682 auto Z = F::make_lvar(bytecode.
z.aty(), LVar<Allocator2>(env.name_of(bytecode.
z), allocator));
683 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.
op, Z,
aty(), allocator),
aty(), allocator);
687 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
688 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env,
bool remove_entailed,
size_t& num_entailed, Allocator2 allocator = Allocator2())
const {
689 using F = TFormula<Allocator2>;
690 typename F::Sequence seq{allocator};
691 seq.push_back(sub->deinterpret(env, allocator));
692 for(
int i = 0; i < bytecodes->size(); ++i) {
693 if(remove_entailed && ask(i)) {
697 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
699 return F::make_nary(AND, std::move(seq),
aty());
702 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
703 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env, Allocator2 allocator = Allocator2())
const {
704 size_t num_entailed = 0;
705 return deinterpret(env,
false, num_entailed, allocator);
708 template<
class I,
class Env,
class Allocator2 =
typename Env::allocator_type>
709 CUDA NI TFormula<Allocator2>
deinterpret(
const I& intermediate,
const Env& env, Allocator2 allocator = Allocator2())
const {
710 using F = TFormula<Allocator2>;
711 typename F::Sequence seq{allocator};
712 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
713 for(
int i = 0; i < intermediate.bytecodes.size(); ++i) {
714 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
716 return F::make_nary(AND, std::move(seq),
aty());
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:224
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:291
CUDA auto operator[](int x) const
Definition pir.hpp:616
CUDA allocator_type get_allocator() const
Definition pir.hpp:207
CUDA auto project(AVar x) const
Definition pir.hpp:620
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:215
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:625
typename A::universe_type universe_type
Definition pir.hpp:57
static constexpr const bool preserve_join
Definition pir.hpp:95
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:634
CUDA int num_deductions() const
Definition pir.hpp:412
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:243
CUDA local::B deduce(int i)
Definition pir.hpp:417
static constexpr const bool preserve_meet
Definition pir.hpp:96
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:403
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:158
CUDA int vars() const
Definition pir.hpp:629
Allocator allocator_type
Definition pir.hpp:59
typename universe_type::local_type local_universe_type
Definition pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:316
static constexpr const bool sequential
Definition pir.hpp:90
static constexpr const char * name
Definition pir.hpp:99
static constexpr const bool injective_concretization
Definition pir.hpp:97
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:98
CUDA local::B ask(int i) const
Definition pir.hpp:398
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:321
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:650
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:351
CUDA AType aty() const
Definition pir.hpp:211
typename A::allocator_type sub_allocator_type
Definition pir.hpp:60
battery::vector< bytecode_type, allocator_type > bytecodes_type
Definition pir.hpp:104
static constexpr const bool preserve_bot
Definition pir.hpp:92
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:199
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, bool remove_entailed, size_t &num_entailed, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:688
static constexpr const bool preserve_top
Definition pir.hpp:94
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:87
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:703
CUDA local::B is_bot() const
Definition pir.hpp:607
static constexpr const bool is_totally_ordered
Definition pir.hpp:91
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:640
CUDA void extract(B &ua) const
Definition pir.hpp:667
CUDA PIR(const PIR2 &other, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:166
CUDA local::B is_top() const
Definition pir.hpp:612
A sub_type
Definition pir.hpp:56
CUDA PIR(PIR &&other)
Definition pir.hpp:173
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:233
static constexpr const bool is_abstract_universe
Definition pir.hpp:89
CUDA local::B deduce(bytecode_type bytecode)
Definition pir.hpp:422
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:327
CUDA INLINE bytecode_type load_deduce(int i) const
Definition pir.hpp:388
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:709
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:126
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:127
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:133
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition pir.hpp:67
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:66
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:70
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition pir.hpp:40
AVar x
Definition pir.hpp:39
Sig op
Definition pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition pir.hpp:44
AVar z
Definition pir.hpp:41
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:279
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:324
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:310
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:249