6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12#include "battery/bitset.hpp"
14#include "lala/logic/logic.hpp"
15#include "lala/logic/ternarize.hpp"
16#include "lala/universes/arith_bound.hpp"
17#include "lala/abstract_deps.hpp"
18#include "lala/vstore.hpp"
24template <
class A,
class Alloc>
class PIR;
28 static constexpr bool value =
false;
30 template<
class A,
class Alloc>
31 struct is_pir_like<PIR<A, Alloc>> {
32 static constexpr bool value =
true;
45 return i == 0 ?
x : (i == 1 ?
y :
z);
53template <
class A,
class Allocator =
typename A::allocator_type>
63 template <
class Alloc>
80 template <
class SnapshotType>
81 CUDA
snapshot_type(
const SnapshotType& other,
const Alloc& alloc = Alloc{})
90 constexpr static const bool sequential = sub_type::sequential;
99 constexpr static const char*
name =
"PIR";
101 template <
class A2,
class Alloc2>
112 static_assert(
sizeof(int) ==
sizeof(AVar),
"The size of AVar must be equal to the size of an int.");
113 static_assert(
sizeof(int) ==
sizeof(Sig),
"The size of Sig must be equal to the size of an int.");
115 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>,
allocator_type>;
118 bytecodes_ptr bytecodes;
120 using LB =
typename local_universe_type::LB;
121 using UB =
typename local_universe_type::UB;
124 template <
class Alloc,
class SubType>
142 template <
class InterpretedType>
148 template <
class Alloc2,
class SubType2>
152 template <
class Alloc>
155 template <
class Alloc>
159 : atype(atype), sub(std::move(sub))
165 template <
class PIR2>
167 : atype(atype), sub(sub)
175 , sub(std::move(other.sub))
176 , ZERO(std::move(other.ZERO))
177 , ONE(std::move(other.ONE))
178 , bytecodes(std::move(other.bytecodes))
184 template<
class A2,
class Alloc2,
class... Allocators>
185 CUDA
static bytecodes_ptr init_bytecodes(
const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
186 auto alloc = deps.template get_allocator<allocator_type>();
187 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
188 if(deps.is_shared_copy()) {
189 assert(
static_cast<bool>(other.bytecodes));
190 return other.bytecodes;
193 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
198 template<
class A2,
class Alloc2,
class... Allocators>
201 , sub(deps.template clone<A>(other.sub))
204 , bytecodes(init_bytecodes(other, deps))
208 return bytecodes.get_allocator();
216 AType atype_sub = UNTYPED,
220 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
225 AType atype_sub = UNTYPED,
229 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
237 AType atype_sub = env.extends_abstract_dom();
238 AType atype = env.extends_abstract_dom();
239 return bot(atype, atype_sub, alloc, sub_alloc);
247 AType atype_sub = env.extends_abstract_dom();
248 AType atype = env.extends_abstract_dom();
249 return top(atype, atype_sub, alloc, sub_alloc);
254 template <IKind kind,
bool diagnose,
class F,
class Env,
class Intermediate>
255 CUDA
bool interpret_formula(
const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics)
const {
256 if(f.type() !=
aty() && !f.is_untyped()) {
257 RETURN_INTERPRETATION_ERROR(
"The type of the formula does not match the type of this abstract domain.");
262 int left = f.seq(0).is_binary() ? 1 : 0;
263 int right = f.seq(1).is_binary() ? 1 : 0;
264 if((sig == EQ || sig == EQUIV) && (left + right == 1)) {
265 auto& X = f.seq(left);
266 auto& Y = f.seq(right).seq(0);
267 auto& Z = f.seq(right).seq(1);
268 bytecode_type bytecode;
269 bytecode.op = f.seq(right).sig();
270 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
271 (bytecode.op == ADD || bytecode.op == MUL || ::lala::is_z_division(bytecode.op) || bytecode.op == EMOD
272 || bytecode.op == MIN || bytecode.op == MAX
273 || bytecode.op == EQ || bytecode.op == LEQ))
275 if( env.template interpret<diagnose>(X, bytecode.x, diagnostics)
276 && env.template interpret<diagnose>(Y, bytecode.y, diagnostics)
277 && env.template interpret<diagnose>(Z, bytecode.z, diagnostics))
279 intermediate.bytecodes.push_back(bytecode);
282 RETURN_INTERPRETATION_ERROR(
"Could not interpret the variables in the environment.");
286 RETURN_INTERPRETATION_ERROR(
"The shape of this formula is not supported.");
290 template <IKind kind,
bool diagnose = false,
class F,
class Env,
class I>
291 CUDA NI
bool interpret(
const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics)
const {
292 size_t error_context = 0;
293 if constexpr(diagnose) {
294 diagnostics.add_suberror(IDiagnostics(
false,
name,
"Uninterpretable formula in both PIR and its sub-domain.", f));
295 error_context = diagnostics.num_suberrors();
298 AType current = f.type();
299 const_cast<F&
>(f).type_as(sub->aty());
300 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
303 const_cast<F&
>(f).type_as(current);
305 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
307 if constexpr(diagnose) {
308 diagnostics.merge(res, error_context);
315 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
317 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
320 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
322 return interpret<IKind::ASK, diagnose>(f,
const_cast<Env&
>(env),
ask, diagnostics);
326 template <
class Alloc2>
328 local::B has_changed = sub->deduce(t.
sub_value);
330 bytecodes->reserve(bytecodes->size() + t.
bytecodes.size());
331 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
340 battery::sorti(*bytecodes,
341 [&](
int i,
int j) {
return (*bytecodes)[i].op < (*bytecodes)[j].op; });
343 std::stable_sort(bytecodes->data(), bytecodes->data() + bytecodes->size(),
346 return a.op == b.op ? (a.y.vid() == b.y.vid() ? (a.x.vid() == b.x.vid() ? a.z.vid() < b.z.vid() : a.x.vid() < b.x.vid()) : a.y.vid() < b.y.vid()) : a.op < b.op;
355 return sub->embed(x, dom);
362 int4 b4 =
reinterpret_cast<int4*
>(bytecodes->data())[i];
365 return (*bytecodes)[i];
369 CUDA local::B
ask(
int i)
const {
373 template <
class Alloc2>
375 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
384 return bytecodes->size();
397#define xl r1.lb().value()
398#define xu r1.ub().value()
399#define yl r2.lb().value()
400#define yu r2.ub().value()
401#define zl r3.lb().value()
402#define zu r3.ub().value()
404#define INF std::numeric_limits<value_t>::max()
405#define MINF std::numeric_limits<value_t>::min()
410 case TDIV:
return battery::tdiv(a, b);
411 case CDIV:
return battery::cdiv(a, b);
412 case FDIV:
return battery::fdiv(a, b);
413 case EDIV:
return battery::ediv(a, b);
414 default: assert(
false);
return a;
418 CUDA local::B
ask(bytecode_type bytecode)
const {
423 switch(bytecode.op) {
424 case EQ:
return (
xl == 1 &&
yu ==
zl &&
yl ==
zu) || (
xu == 0 && (yu < zl || yl >
zu));
425 case LEQ:
return (
xl == 1 &&
yu <=
zl) || (
xu == 0 &&
yl >
zu);
427 case MUL:
return xl ==
xu &&
429 || (
xl == 0 && (r2 == 0 || r3 == 0)));
433 case EDIV:
return (
xl ==
xu &&
yl ==
yu &&
zl ==
zu &&
zl != 0 &&
xl == div(
yl, bytecode.op,
zl))
438 default: assert(
false);
return false;
443 return battery::min(a, b);
447 return battery::max(a, b);
451 CUDA INLINE
void itv_div(Sig op,
Itv& r1,
Itv& r2,
Itv& r3) {
452 if(zl < 0 && zu > 0) {
457 if(
zl == 0) { r3.lb() = 1; }
458 if(
zu == 0) { r3.ub() = -1; }
461 if(r3.is_bot()) {
return; }
462 auto t1 = div(
yl, op,
zl);
463 auto t2 = div(
yl, op,
zu);
464 auto t3 = div(
yu, op,
zl);
465 auto t4 = div(
yu, op,
zu);
466 r1.lb() = max(
xl, min(min(t1, t2), min(t3, t4)));
467 r1.ub() = min(
xu, max(max(t1, t2), max(t3, t4)));
471 CUDA INLINE
void mul_inv(
const Itv& r1,
Itv& r2,
Itv& r3) {
472 if(
xl > 0 ||
xu < 0) {
473 if(
zl == 0) { r3.lb() = 1; }
474 if(
zu == 0) { r3.ub() = -1; }
476 if((
xl > 0 ||
xu < 0) && zl < 0 && zu > 0) {
480 else if(
xl > 0 || xu < 0 || zl > 0 ||
zu < 0) {
483 if(r3.is_bot()) {
return; }
484 r2.lb() = max(
yl, min(min(battery::cdiv(
xl,
zl), battery::cdiv(
xl,
zu)), min(battery::cdiv(
xu,
zl), battery::cdiv(
xu,
zu))));
485 r2.ub() = min(
yu, max(max(battery::fdiv(
xl,
zl), battery::fdiv(
xl,
zu)), max(battery::fdiv(
xu,
zl), battery::fdiv(
xu,
zu))));
491 local::B has_changed =
false;
493 Itv r1((*sub)[bytecode.
x]);
494 Itv r2((*sub)[bytecode.
y]);
495 Itv r3((*sub)[bytecode.
z]);
498 switch(bytecode.
op) {
501 has_changed |= sub->embed(bytecode.
y, r3);
502 has_changed |= sub->embed(bytecode.
z, r2);
504 else if(r1 == ZERO && (
yl ==
yu ||
zl ==
zu)) {
505 has_changed |= sub->embed(
zl ==
zu ? bytecode.
y : bytecode.
z,
507 yl ==
zl ?
yl + 1 : LB::top().value(),
508 yu ==
zu ?
yu - 1 : UB::top().value()));
510 else if(
yu ==
zl &&
yl ==
zu) { has_changed |= sub->embed(bytecode.
x, ONE); }
511 else if(
yl >
zu ||
yu <
zl) { has_changed |= sub->embed(bytecode.
x, ZERO); }
516 has_changed |= sub->embed(bytecode.
y,
Itv(
yl,
zu));
517 has_changed |= sub->embed(bytecode.
z,
Itv(
yl,
zu));
519 else if(r1 == ZERO) {
520 has_changed |= sub->embed(bytecode.
y,
Itv(
zl + 1,
yu));
521 has_changed |= sub->embed(bytecode.
z,
Itv(
zl,
yu - 1));
523 else if(
yu <=
zl) { has_changed |= sub->embed(bytecode.
x, ONE); }
524 else if(
yl >
zu) { has_changed |= sub->embed(bytecode.
x, ZERO); }
542 r1.lb() = max(
xl, min(min(t1, t2), min(t3, t4)));
543 r1.ub() = min(
xu, max(max(t1, t2), max(t3, t4)));
553 itv_div(bytecode.
op, r1, r2, r3);
567 if(
zl == 0) { r3.lb() = 1; }
568 if(
zu == 0) { r3.ub() = -1; }
570 r1.lb() = battery::emod(
yl,
zl);
576 r1.lb() = max(
xl, min(
yl,
zl));
577 r1.ub() = min(
xu, min(
yu,
zu));
578 r2.lb() = max(
yl,
xl);
579 if(
xu <
zl) { r2.ub() = min(
yu,
xu); }
580 r3.lb() = max(
zl,
xl);
581 if(
xu <
yl) { r3.ub() = min(
zu,
xu); }
585 r1.lb() = max(
xl, max(
yl,
zl));
586 r1.ub() = min(
xu, max(
yu,
zu));
587 r2.ub() = min(
yu,
xu);
588 if(
xl >
zu) { r2.lb() = max(
yl,
xl); }
589 r3.ub() = min(
zu,
xu);
590 if(
xl >
yu) { r3.lb() = max(
zl,
xl); }
593 default: assert(
false);
595 has_changed |= sub->embed(bytecode.
x, r1);
596 has_changed |= sub->embed(bytecode.
y, r2);
597 has_changed |= sub->embed(bytecode.
z, r3);
614 return sub->is_bot();
619 return sub->is_top() && bytecodes->size() == 0;
627 return sub->project(x);
630 template <
class Univ>
639 template <
class Alloc2 = allocator_type>
641 assert(
static_cast<bool>(bytecodes));
645 template <
class Alloc2>
647 int n = bytecodes->size();
649 bytecodes->pop_back();
655 template <
class ExtractionStrategy = NonAtomicExtraction>
656 CUDA
bool is_extractable(
const ExtractionStrategy& strategy = ExtractionStrategy())
const {
660 for(
int i = 0; i < bytecodes->size(); ++i) {
665 return sub->is_extractable(strategy);
674 if constexpr(impl::is_pir_like<B>::value) {
675 sub->extract(*ua.sub);
683 template<
class Env,
class Allocator2>
684 CUDA NI TFormula<Allocator2> deinterpret(
bytecode_type bytecode,
const Env& env, Allocator2 allocator)
const {
685 using F = TFormula<Allocator2>;
686 auto X = F::make_lvar(bytecode.
x.aty(), LVar<Allocator2>(env.name_of(bytecode.
x), allocator));
687 auto Y = F::make_lvar(bytecode.
y.aty(), LVar<Allocator2>(env.name_of(bytecode.
y), allocator));
688 auto Z = F::make_lvar(bytecode.
z.aty(), LVar<Allocator2>(env.name_of(bytecode.
z), allocator));
689 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.
op, Z,
aty(), allocator),
aty(), allocator);
693 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
694 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env,
bool remove_entailed,
size_t& num_entailed, Allocator2 allocator = Allocator2())
const {
695 using F = TFormula<Allocator2>;
696 typename F::Sequence seq{allocator};
697 seq.push_back(sub->deinterpret(env, allocator));
698 for(
int i = 0; i < bytecodes->size(); ++i) {
699 if(remove_entailed &&
ask(i)) {
703 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
705 return F::make_nary(AND, std::move(seq),
aty());
708 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
709 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env, Allocator2 allocator = Allocator2())
const {
710 size_t num_entailed = 0;
711 return deinterpret(env,
false, num_entailed, allocator);
714 template<
class I,
class Env,
class Allocator2 =
typename Env::allocator_type>
715 CUDA NI TFormula<Allocator2>
deinterpret(
const I& intermediate,
const Env& env, Allocator2 allocator = Allocator2())
const {
716 using F = TFormula<Allocator2>;
717 typename F::Sequence seq{allocator};
718 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
719 for(
int i = 0; i < intermediate.bytecodes.size(); ++i) {
720 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
722 return F::make_nary(AND, std::move(seq),
aty());
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:224
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:291
CUDA auto operator[](int x) const
Definition pir.hpp:622
CUDA allocator_type get_allocator() const
Definition pir.hpp:207
CUDA auto project(AVar x) const
Definition pir.hpp:626
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:215
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:631
typename A::universe_type universe_type
Definition pir.hpp:57
local_universe_type Itv
Definition pir.hpp:393
static constexpr const bool preserve_join
Definition pir.hpp:95
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:640
CUDA int num_deductions() const
Definition pir.hpp:383
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:243
CUDA local::B deduce(int i)
Definition pir.hpp:388
static constexpr const bool preserve_meet
Definition pir.hpp:96
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:374
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:158
CUDA int vars() const
Definition pir.hpp:635
Allocator allocator_type
Definition pir.hpp:59
typename universe_type::local_type local_universe_type
Definition pir.hpp:58
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:316
static constexpr const bool sequential
Definition pir.hpp:90
static constexpr const char * name
Definition pir.hpp:99
static constexpr const bool injective_concretization
Definition pir.hpp:97
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:98
CUDA local::B ask(int i) const
Definition pir.hpp:369
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:321
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:656
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:354
CUDA AType aty() const
Definition pir.hpp:211
typename A::allocator_type sub_allocator_type
Definition pir.hpp:60
battery::vector< bytecode_type, allocator_type > bytecodes_type
Definition pir.hpp:104
static constexpr const bool preserve_bot
Definition pir.hpp:92
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:199
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, bool remove_entailed, size_t &num_entailed, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:694
static constexpr const bool preserve_top
Definition pir.hpp:94
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:87
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:709
CUDA local::B is_bot() const
Definition pir.hpp:613
static constexpr const bool is_totally_ordered
Definition pir.hpp:91
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:646
CUDA void extract(B &ua) const
Definition pir.hpp:673
CUDA PIR(const PIR2 &other, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:166
CUDA local::B is_top() const
Definition pir.hpp:618
A sub_type
Definition pir.hpp:56
CUDA PIR(PIR &&other)
Definition pir.hpp:173
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:233
static constexpr const bool is_abstract_universe
Definition pir.hpp:89
typename Itv::LB::value_type value_t
Definition pir.hpp:394
CUDA local::B deduce(bytecode_type bytecode)
Definition pir.hpp:490
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:327
CUDA INLINE bytecode_type load_deduce(int i) const
Definition pir.hpp:359
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:715
#define INF
Definition pir.hpp:404
#define zl
Definition pir.hpp:401
#define xu
Definition pir.hpp:398
#define MINF
Definition pir.hpp:405
#define zu
Definition pir.hpp:402
#define yl
Definition pir.hpp:399
#define yu
Definition pir.hpp:400
#define xl
Definition pir.hpp:397
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:138
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:143
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:126
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:127
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:133
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:81
snapshot_type(const snapshot_type< Alloc > &)=default
snapshot_type(snapshot_type< Alloc > &&)=default
int num_bytecodes
Definition pir.hpp:67
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:68
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:66
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(int num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:70
constexpr bytecode_type()=default
constexpr bytecode_type(const bytecode_type &)=default
AVar y
Definition pir.hpp:40
AVar x
Definition pir.hpp:39
Sig op
Definition pir.hpp:38
CUDA INLINE const AVar & operator[](int i) const
Definition pir.hpp:44
AVar z
Definition pir.hpp:41