6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
13#include "lala/logic/logic.hpp"
14#include "lala/logic/ternarize.hpp"
15#include "lala/universes/arith_bound.hpp"
16#include "lala/abstract_deps.hpp"
17#include "lala/vstore.hpp"
23template <
class A,
class Alloc>
class PIR;
27 static constexpr bool value =
false;
29 template<
class A,
class Alloc>
30 struct is_pir_like<PIR<A, Alloc>> {
31 static constexpr bool value =
true;
51template <
class A,
class Allocator =
typename A::allocator_type>
61 template <
class Alloc>
78 template <
class SnapshotType>
79 CUDA
snapshot_type(
const SnapshotType& other,
const Alloc& alloc = Alloc{})
88 constexpr static const bool sequential = sub_type::sequential;
97 constexpr static const char*
name =
"PIR";
99 template <
class A2,
class Alloc2>
109 static_assert(
sizeof(int) ==
sizeof(AVar),
"The size of AVar must be equal to the size of an int.");
110 static_assert(
sizeof(int) ==
sizeof(Sig),
"The size of Sig must be equal to the size of an int.");
112 using bytecodes_type = battery::vector<bytecode_type, allocator_type>;
113 using bytecodes_ptr = battery::root_ptr<battery::vector<bytecode_type, allocator_type>,
allocator_type>;
116 bytecodes_ptr bytecodes;
118 using LB =
typename local_universe_type::LB;
119 using UB =
typename local_universe_type::UB;
122 template <
class Alloc,
class SubType>
140 template <
class InterpretedType>
146 template <
class Alloc2,
class SubType2>
150 template <
class Alloc>
153 template <
class Alloc>
157 : atype(atype), sub(std::move(sub))
160 , bytecodes(battery::allocate_root<bytecodes_type,
allocator_type>(alloc, alloc))
165 , sub(std::move(other.sub))
166 , ZERO(std::move(other.ZERO))
167 , ONE(std::move(other.ONE))
168 , bytecodes(std::move(other.bytecodes))
174 template<
class A2,
class Alloc2,
class... Allocators>
175 CUDA
static bytecodes_ptr init_bytecodes(
const PIR<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
177 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
178 if(deps.is_shared_copy()) {
179 return other.bytecodes;
182 bytecodes_ptr r = battery::allocate_root<bytecodes_type, allocator_type>(alloc, *(other.bytecodes), alloc);
187 template<
class A2,
class Alloc2,
class... Allocators>
190 , sub(deps.template clone<A>(other.sub))
193 , bytecodes(init_bytecodes(other, deps))
197 return bytecodes.get_allocator();
205 AType atype_sub = UNTYPED,
209 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
214 AType atype_sub = UNTYPED,
218 return PIR{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
226 AType atype_sub = env.extends_abstract_dom();
227 AType atype = env.extends_abstract_dom();
228 return bot(atype, atype_sub, alloc, sub_alloc);
236 AType atype_sub = env.extends_abstract_dom();
237 AType atype = env.extends_abstract_dom();
238 return top(atype, atype_sub, alloc, sub_alloc);
243 template <IKind kind,
bool diagnose,
class F,
class Env,
class Intermediate>
244 CUDA
bool interpret_formula(
const F& f, Env& env, Intermediate& intermediate, IDiagnostics& diagnostics)
const {
245 if(f.type() !=
aty() && !f.is_untyped()) {
246 RETURN_INTERPRETATION_ERROR(
"The type of the formula does not match the type of this abstract domain.");
251 int left = f.seq(0).is_binary();
252 int right = f.seq(1).is_binary();
253 if(sig == EQ && (left || right)) {
254 auto& X = f.seq(left);
255 auto& Y = f.seq(right).seq(0);
256 auto& Z = f.seq(right).seq(1);
257 bytecode_type bytecode;
258 bytecode.op = f.seq(right).sig();
259 if(X.is_variable() && Y.is_variable() && Z.is_variable() &&
260 (bytecode.op == ADD || bytecode.op == MUL || bytecode.op == SUB || bytecode.op == EDIV || bytecode.op == EMOD
261 || bytecode.op == MIN || bytecode.op == MAX
262 || bytecode.op == EQ || bytecode.op == LEQ))
268 intermediate.bytecodes.push_back(bytecode);
275 RETURN_INTERPRETATION_ERROR(
"The shape of this formula is not supported.");
279 template <IKind kind,
bool diagnose = false,
class F,
class Env,
class I>
280 CUDA NI
bool interpret(
const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics)
const {
281 size_t error_context = 0;
282 if constexpr(diagnose) {
283 diagnostics.add_suberror(IDiagnostics(
false,
name,
"Uninterpretable formula in both PIR and its sub-domain.", f));
284 error_context = diagnostics.num_suberrors();
287 AType current = f.type();
288 const_cast<F&
>(f).type_as(sub->aty());
293 if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
297 res = universe_type::preserve_join;
300 const_cast<F&
>(f).type_as(current);
302 if(f.is_binary() && f.sig() == IN && f.seq(1).is(F::S)) {
303 F g = normalize(ternarize(decompose_in_constraint(f), env));
304 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
307 else if(f.is_binary() && f.seq(1).is_binary() && f.seq(1).sig() == IN) {
308 F g = normalize(ternarize(F::make_binary(f.seq(0), f.sig(), decompose_in_constraint(f.seq(1))), env,
true));
309 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
312 else if(num_vars(f) == 1) {
313 F g = normalize(ternarize(f, env,
true));
315 res = ginterpret_in<kind, diagnose>(*
this, g, env, intermediate, diagnostics);
318 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
322 res = interpret_formula<kind, diagnose>(f, env, intermediate, diagnostics);
325 if constexpr(diagnose) {
326 diagnostics.merge(res, error_context);
333 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
338 template <
bool diagnose = false,
class F,
class Env,
class Alloc2>
344 template <
class Alloc2>
346 local::B has_changed = sub->deduce(t.
sub_value);
348 bytecodes->reserve(bytecodes->size() + t.
bytecodes.size());
349 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
353 battery::sorti(*bytecodes,
354 [&](
int i,
int j) {
return (*bytecodes)[i].op < (*bytecodes)[j].op; });
361 return sub->embed(x, dom);
372 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
375 return bytecode.
op == EQ
376 ? (r2 == r3 && r2.lb().value() == r2.ub().value())
377 : r2.ub().value() <= r3.lb().value();
380 else if(r1 >= ZERO) {
381 return bytecode.
op == EQ
382 ? fmeet(r2, r3).is_bot().value()
383 : r2.lb().value() > r3.ub().value();
391 right.project(bytecode.
op, r2, r3);
392 return right == r1 && r1.lb().value() == r1.ub().value();
397 CUDA local::B
ask(
size_t i)
const {
398 return ask((*bytecodes)[i]);
401 template <
class Alloc2>
403 for(
int i = 0; i < t.
bytecodes.size(); ++i) {
412 return bytecodes->size();
422 local::B has_changed =
false;
429 if(bytecode.
op == EQ || bytecode.
op == LEQ) {
430 r1 = (*sub)[bytecode.
x];
433 if(bytecode.
op == EQ) {
434 has_changed |= sub->embed(bytecode.
y, r3);
435 has_changed |= sub->embed(bytecode.
z, r2);
438 assert(bytecode.
op == LEQ);
439 r3.join_lb(LB::top());
440 r2.join_ub(UB::top());
441 has_changed |= sub->embed(bytecode.
y, r3);
442 has_changed |= sub->embed(bytecode.
z, r2);
446 else if(r1 <= ZERO) {
447 if(bytecode.
op == EQ) {
448 if(r2.lb().value() == r2.ub().value()) {
450 r1.meet_lb(LB::prev(r2.lb()));
451 r3.meet_ub(UB::prev(r2.ub()));
452 has_changed |= sub->embed(bytecode.
z, fjoin(r1,r3));
454 else if(r3.lb().value() == r3.ub().value()) {
456 r1.meet_lb(LB::prev(r3.lb()));
457 r2.meet_ub(UB::prev(r3.ub()));
458 has_changed |= sub->embed(bytecode.
y, fjoin(r1,r2));
462 assert(bytecode.
op == LEQ);
463 r2.meet_lb(LB::prev(r3.lb()));
464 r3.meet_ub(UB::prev(r2.ub()));
465 has_changed |= sub->embed(bytecode.
y, r2);
466 has_changed |= sub->embed(bytecode.
z, r3);
470 else if(r2.ub().value() <= r3.lb().value() && (bytecode.
op == LEQ || r2.lb().value() == r3.ub().value())) {
471 has_changed |= sub->embed(bytecode.
x, ONE);
474 else if(r2.lb().value() > r3.ub().value() || (bytecode.
op == EQ && r2.ub().value() < r3.lb().value())) {
475 has_changed |= sub->embed(bytecode.
x, ZERO);
481 r1.project(bytecode.
op, r2, r3);
482 has_changed |= sub->embed(bytecode.
x, r1);
485 r1 = (*sub)[bytecode.
x];
486 switch(bytecode.
op) {
493 default: assert(
false);
495 has_changed |= sub->embed(bytecode.
y, r2);
498 r2 = (*sub)[bytecode.
y];
500 switch(bytecode.
op) {
507 default: assert(
false);
509 has_changed |= sub->embed(bytecode.
z, r3);
518 return sub->is_bot();
523 return sub->is_top() && bytecodes->size() == 0;
531 return sub->project(x);
534 template <
class Univ>
543 template <
class Alloc2 = allocator_type>
548 template <
class Alloc2>
550 size_t n = bytecodes->size();
552 bytecodes->pop_back();
558 template <
class ExtractionStrategy = NonAtomicExtraction>
559 CUDA
bool is_extractable(
const ExtractionStrategy& strategy = ExtractionStrategy())
const {
563 for(
int i = 0; i < bytecodes->size(); ++i) {
568 return sub->is_extractable(strategy);
577 if constexpr(impl::is_pir_like<B>::value) {
578 sub->extract(*ua.sub);
586 template<
class Env,
class Allocator2>
587 CUDA NI TFormula<Allocator2> deinterpret(
bytecode_type bytecode,
const Env& env, Allocator2 allocator)
const {
588 using F = TFormula<Allocator2>;
589 auto X = F::make_lvar(bytecode.
x.aty(), LVar<Allocator2>(env.name_of(bytecode.
x), allocator));
590 auto Y = F::make_lvar(bytecode.
y.aty(), LVar<Allocator2>(env.name_of(bytecode.
y), allocator));
591 auto Z = F::make_lvar(bytecode.
z.aty(), LVar<Allocator2>(env.name_of(bytecode.
z), allocator));
592 return F::make_binary(X, EQ, F::make_binary(Y, bytecode.
op, Z,
aty(), allocator),
aty(), allocator);
596 template<
class Env,
class Allocator2 =
typename Env::allocator_type>
597 CUDA NI TFormula<Allocator2>
deinterpret(
const Env& env, Allocator2 allocator = Allocator2())
const {
598 using F = TFormula<Allocator2>;
599 typename F::Sequence seq{allocator};
600 seq.push_back(sub->deinterpret(env, allocator));
601 for(
int i = 0; i < bytecodes->size(); ++i) {
602 seq.push_back(deinterpret((*bytecodes)[i], env, allocator));
604 return F::make_nary(AND, std::move(seq),
aty());
607 template<
class I,
class Env,
class Allocator2 =
typename Env::allocator_type>
608 CUDA NI TFormula<Allocator2>
deinterpret(
const I& intermediate,
const Env& env, Allocator2 allocator = Allocator2())
const {
609 using F = TFormula<Allocator2>;
610 typename F::Sequence seq{allocator};
611 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
612 for(
int i = 0; i < intermediate.bytecodes.size(); ++i) {
613 seq.push_back(deinterpret(intermediate.bytecodes[i], env, allocator));
615 return F::make_nary(AND, std::move(seq),
aty());
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:213
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pir.hpp:280
CUDA auto operator[](int x) const
Definition pir.hpp:526
CUDA allocator_type get_allocator() const
Definition pir.hpp:196
CUDA auto project(AVar x) const
Definition pir.hpp:530
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:204
CUDA void project(AVar x, Univ &u) const
Definition pir.hpp:535
typename A::universe_type universe_type
Definition pir.hpp:55
CUDA size_t num_deductions() const
Definition pir.hpp:411
static constexpr const bool preserve_join
Definition pir.hpp:93
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pir.hpp:544
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:232
static constexpr const bool preserve_meet
Definition pir.hpp:94
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pir.hpp:402
CUDA PIR(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pir.hpp:156
Allocator allocator_type
Definition pir.hpp:57
typename universe_type::local_type local_universe_type
Definition pir.hpp:56
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pir.hpp:334
static constexpr const bool sequential
Definition pir.hpp:88
CUDA local::B deduce(size_t i)
Definition pir.hpp:416
static constexpr const char * name
Definition pir.hpp:97
CUDA size_t vars() const
Definition pir.hpp:539
static constexpr const bool injective_concretization
Definition pir.hpp:95
static constexpr const bool preserve_concrete_covers
Definition pir.hpp:96
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pir.hpp:339
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pir.hpp:559
CUDA bool embed(AVar x, const universe_type &dom)
Definition pir.hpp:360
CUDA AType aty() const
Definition pir.hpp:200
typename A::allocator_type sub_allocator_type
Definition pir.hpp:58
static constexpr const bool preserve_bot
Definition pir.hpp:90
CUDA PIR(const PIR< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pir.hpp:188
static constexpr const bool preserve_top
Definition pir.hpp:92
abstract_ptr< sub_type > sub_ptr
Definition pir.hpp:85
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:597
CUDA local::B is_bot() const
Definition pir.hpp:517
static constexpr const bool is_totally_ordered
Definition pir.hpp:89
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pir.hpp:549
CUDA void extract(B &ua) const
Definition pir.hpp:576
CUDA local::B is_top() const
Definition pir.hpp:522
A sub_type
Definition pir.hpp:54
CUDA PIR(PIR &&other)
Definition pir.hpp:163
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pir.hpp:222
static constexpr const bool is_abstract_universe
Definition pir.hpp:87
CUDA local::B ask(size_t i) const
Definition pir.hpp:397
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pir.hpp:345
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pir.hpp:608
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pir.hpp:136
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:141
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pir.hpp:124
interpreted_type & operator=(interpreted_type &&)=default
interpreted_type(const interpreted_type &)=default
battery::vector< bytecode_type, Alloc > bytecodes
Definition pir.hpp:125
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pir.hpp:131
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pir.hpp:79
snapshot_type(const snapshot_type< Alloc > &)=default
size_t num_bytecodes
Definition pir.hpp:65
snapshot_type(snapshot_type< Alloc > &&)=default
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
sub_snap_type sub_snap
Definition pir.hpp:66
A::template snapshot_type< Alloc > sub_snap_type
Definition pir.hpp:64
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(size_t num_bytecodes, sub_snap_type &&sub_snap)
Definition pir.hpp:68
AVar y
Definition pir.hpp:41
AVar x
Definition pir.hpp:40
Sig op
Definition pir.hpp:39
AVar z
Definition pir.hpp:42
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:196
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:200
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:272
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:276
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:310
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:296
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:255
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:249
static CUDA void left_residual(const U &a, const U &b, U &r)
Definition terms.hpp:218
static CUDA void right_residual(const U &a, const U &b, U &r)
Definition terms.hpp:222