Lattice Land Core Library
Loading...
Searching...
No Matches
vstore.hpp
Go to the documentation of this file.
1// Copyright 2021 Pierre Talbot
2
3#ifndef LALA_CORE_VSTORE_HPP
4#define LALA_CORE_VSTORE_HPP
5
6#include "logic/logic.hpp"
8#include "abstract_deps.hpp"
9#include <optional>
10
11namespace lala {
12
14 static constexpr bool atoms = false;
15 };
16
18 static constexpr bool atoms = true;
19 };
20
21/** The variable store abstract domain is a _domain transformer_ built on top of an abstract universe `U`.
22Concretization function: \f$ \gamma(\rho) := \bigcap_{x \in \pi(\rho)} \gamma_{U_x}(\rho(x)) \f$.
23The top element is smashed and the equality between two stores is represented by the following equivalence relation, for two stores \f$ S \f$ and \f$ T \f$:
24\f$ S \equiv T \Leftrightarrow \forall{x \in \mathit{Vars}},~S(x) = T(x) \lor \exists{x \in \mathit{dom}(S)},\exists{y \in \mathit{dom}(T)},~S(x) = \top \land T(y) = \top \f$.
25Intuitively, it means that either all elements are equal or both stores have a top element, in which case they "collapse" to the top element, and are considered equal.
26
27The bottom element is the element \f$ \langle \bot, \ldots \rangle \f$, that is an infinite number of variables initialized to bottom.
28In practice, we cannot represent infinite collections, so we represent bottom either as the empty collection or with a finite number of bottom elements.
29Any finite store \f$ \langle x_1, \ldots, x_n \rangle \f$ should be seen as the concrete store \f$ \langle x_1, \ldots, x_n, \bot, \ldots \rangle \f$.
30
31This semantics has implication when joining or merging two elements.
32For instance, \f$ \langle 1 \rangle.\mathit{dtell}(\langle \bot, 4 \rangle) \f$ will be equal to bottom, in that case represented by \f$ \langle \bot \rangle \f$.
33
34Template parameters:
35 - `U` is the type of the abstract universe.
36 - `Allocator` is the allocator of the underlying array of universes. */
37template<class U, class Allocator>
38class VStore {
39public:
40 using universe_type = U;
41 using local_universe = typename universe_type::local_type;
42 using allocator_type = Allocator;
44
45 template <class Alloc>
46 struct var_dom {
49 var_dom() = default;
50 var_dom(const var_dom<Alloc>&) = default;
51 CUDA explicit var_dom(const Alloc&) {}
53 template <class VarDom>
54 CUDA var_dom(const VarDom& other): avar(other.avar), dom(other.dom) {}
55 };
56
57 template <class Alloc>
58 using tell_type = battery::vector<var_dom<Alloc>, Alloc>;
59
60 template <class Alloc>
62
63 template <class Alloc = allocator_type>
64 using snapshot_type = battery::vector<local_universe, Alloc>;
65
66 constexpr static const bool is_abstract_universe = false;
67 constexpr static const bool sequential = universe_type::sequential;
68 constexpr static const bool is_totally_ordered = false;
69 constexpr static const bool preserve_bot = true;
70 constexpr static const bool preserve_top = true;
71 constexpr static const bool preserve_join = universe_type::preserve_join;
72 constexpr static const bool preserve_meet = universe_type::preserve_meet;
73 constexpr static const bool injective_concretization = universe_type::injective_concretization;
74 constexpr static const bool preserve_concrete_covers = universe_type::preserve_concrete_covers;
75 constexpr static const char* name = "VStore";
76
77 template<class U2, class Alloc2>
78 friend class VStore;
79
80private:
81 using store_type = battery::vector<universe_type, allocator_type>;
82 using memory_type = typename universe_type::memory_type;
83
84 AType atype;
85 store_type data;
86 BInc<memory_type> is_at_top;
87
88public:
89 CUDA VStore(const this_type& other)
90 : atype(other.atype), data(other.data), is_at_top(other.is_at_top)
91 {}
92
93 /** Initialize an empty store. */
94 CUDA VStore(AType atype, const allocator_type& alloc = allocator_type())
95 : atype(atype), data(alloc), is_at_top(false)
96 {}
97
98 CUDA VStore(AType atype, size_t size, const allocator_type& alloc = allocator_type())
99 : atype(atype), data(size, alloc), is_at_top(false)
100 {}
101
102 template<class R>
104 : atype(other.atype), data(other.data), is_at_top(other.is_at_top)
105 {}
106
107 template<class R, class Alloc2>
108 CUDA VStore(const VStore<R, Alloc2>& other, const allocator_type& alloc = allocator_type())
109 : atype(other.atype), data(other.data, alloc), is_at_top(other.is_at_top)
110 {}
111
112 /** Copy the vstore `other` in the current element.
113 * `deps` can be empty and is not used besides to get the allocator (since this abstract domain does not have dependencies). */
114 template<class R, class Alloc2, class... Allocators>
116 : VStore(other, deps.template get_allocator<allocator_type>()) {}
117
118 CUDA VStore(this_type&& other):
119 atype(other.atype), data(std::move(other.data)), is_at_top(other.is_at_top) {}
120
122 return data.get_allocator();
123 }
124
125 CUDA AType aty() const {
126 return atype;
127 }
128
129 /** Returns the number of variables currently represented by this abstract element. */
130 CUDA size_t vars() const {
131 return data.size();
132 }
133
134 CUDA static this_type bot(AType atype = UNTYPED,
135 const allocator_type& alloc = allocator_type{})
136 {
137 return VStore(atype, alloc);
138 }
139
140 /** A special symbolic element representing top. */
141 CUDA static this_type top(AType atype = UNTYPED,
142 const allocator_type& alloc = allocator_type{})
143 {
144 return std::move(VStore{atype, alloc}.tell_top());
145 }
146
147 template <class Env>
148 CUDA static this_type bot(Env& env,
149 const allocator_type& alloc = allocator_type{})
150 {
151 return bot(env.extends_abstract_dom(), alloc);
152 }
153
154 template <class Env>
155 CUDA static this_type top(Env& env,
156 const allocator_type& alloc = allocator_type{})
157 {
158 return top(env.extends_abstract_dom(), alloc);
159 }
160
161 /** `true` if at least one element is equal to top in the store, `false` otherwise. */
162 CUDA local::BInc is_top() const {
163 return is_at_top;
164 }
165
166 /** The bottom element of a store of `n` variables is when all variables are at bottom, or the store is empty.
167 * We do not expect to use this operation a lot, so its complexity is linear in the number of variables. */
168 CUDA local::BDec is_bot() const {
169 if(is_at_top) { return false; }
170 for(int i = 0; i < vars(); ++i) {
171 if(!data[i].is_bot()) {
172 return false;
173 }
174 }
175 return true;
176 }
177
178 /** Take a snapshot of the current variable store. */
179 template <class Alloc = allocator_type>
180 CUDA snapshot_type<Alloc> snapshot(const Alloc& alloc = Alloc()) const {
181 return snapshot_type<Alloc>(data, alloc);
182 }
183
184 template <class Alloc>
186 while(snap.size() < data.size()) {
187 data.pop_back();
188 }
189 is_at_top.dtell_bot();
190 for(int i = 0; i < snap.size(); ++i) {
191 data[i].dtell(snap[i]);
192 is_at_top.tell(data[i].is_top());
193 }
194 return *this;
195 }
196
197private:
198 template <bool diagnose, class F, class Env, class Alloc2>
199 CUDA NI bool interpret_existential(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
200 assert(f.is(F::E));
201 var_dom<Alloc2> k;
202 if(local_universe::template interpret_tell<diagnose>(f, env, k.dom, diagnostics)) {
203 if(env.interpret(f.map_atype(atype), k.avar, diagnostics)) {
204 tell.push_back(k);
205 return true;
206 }
207 }
208 return false;
209 }
210
211 /** Interpret a predicate without variables. */
212 template <bool diagnose, class F, class Env, class Alloc2>
213 CUDA NI bool interpret_zero_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
214 if(f.is_true()) {
215 return true;
216 }
217 else if(f.is_false()) {
218 tell.push_back(var_dom<Alloc2>(AVar{}, U::top()));
219 return true;
220 }
221 else {
222 RETURN_INTERPRETATION_ERROR("Only `true` and `false` can be interpreted in the store without being named.");
223 }
224 }
225
226 /** Interpret a predicate with a single variable occurrence. */
227 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
228 CUDA NI bool interpret_unary_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
230 bool res = local_universe::template interpret<kind, diagnose>(f, env, u, diagnostics);
231 if(res) {
232 const auto& varf = var_in(f);
233 // When it is not necessary, we try to avoid using the environment.
234 // This is for instance useful when refinement operators add new constraints but do not have access to the environment (e.g., split()), and to avoid passing the environment around everywhere.
235 if(varf.is(F::V)) {
236 tell.push_back(var_dom<Alloc2>(varf.v(), u));
237 }
238 else {
239 auto var = var_in(f, env);
240 if(!var.has_value()) {
241 RETURN_INTERPRETATION_ERROR("Undeclared variable.");
242 }
243 auto avar = var->avar_of(atype);
244 if(!avar.has_value()) {
245 RETURN_INTERPRETATION_ERROR("The variable was not declared in the current abstract element (but exists in other abstract elements).");
246 }
247 tell.push_back(var_dom<Alloc2>(*avar, u));
248 }
249 return true;
250 }
251 else {
252 RETURN_INTERPRETATION_ERROR("Could not interpret a unary predicate in the underlying abstract universe.");
253 }
254 }
255
256 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
257 CUDA NI bool interpret_predicate(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
258 if(f.type() != UNTYPED && f.type() != aty()) {
259 RETURN_INTERPRETATION_ERROR("The abstract type of this predicate does not match the one of the current abstract element.");
260 }
261 if constexpr(kind == IKind::TELL) {
262 if(f.is(F::E)) {
263 return interpret_existential<diagnose>(f, env, tell, diagnostics);
264 }
265 }
266 switch(num_vars(f)) {
267 case 0: return interpret_zero_predicate<diagnose>(f, env, tell, diagnostics);
268 case 1: return interpret_unary_predicate<kind, diagnose>(f, env, tell, diagnostics);
269 default: RETURN_INTERPRETATION_ERROR("Interpretation of n-ary predicate is not supported in VStore.");
270 }
271 }
272
273public:
274 template <IKind kind, bool diagnose = false, class F, class Env, class I>
275 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
276 if(f.is_untyped() || f.type() == aty()) {
277 return interpret_predicate<kind, diagnose>(f, env, intermediate, diagnostics);
278 }
279 else {
280 RETURN_INTERPRETATION_ERROR("This abstract element cannot interpret a formula with a different type.");
281 }
282 }
283
284 /** The store of variables lattice expects a formula with a single variable (including existential quantifiers) that can be handled by the abstract universe `U`.
285 *
286 * Variables must be existentially quantified before a formula containing variables can be interpreted.
287 * Variables are immediately assigned to an index of `VStore` and initialized to \f$ \bot_U \f$.
288 * Shadowing/redeclaration of variables with existential quantifier is not supported.
289 * The variable mapping is added to the environment only if the interpretation succeeds.
290
291 * There is a small quirk: different stores might be produced if quantifiers do not appear in the same order.
292 * This is because we attribute the first available index to variables when interpreting the quantifier.
293 * In that case, the store will only be equivalent modulo the `env` structure.
294 */
295 template <bool diagnose = false, class F, class Env, class Alloc2>
296 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
297 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
298 }
299
300 /** Similar to `interpret_tell` but do not support existential quantifier and therefore leaves `env` unchanged. */
301 template <bool diagnose = false, class F, class Env, class Alloc2>
302 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
303 return const_cast<this_type*>(this)->interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
304 }
305
306 /** The projection must stay const, otherwise the user might tell new information in the universe, but we need to know in case we reach `top`. */
307 CUDA const universe_type& project(AVar x) const {
308 assert(x.aty() == aty());
309 assert(x.vid() < data.size());
310 return data[x.vid()];
311 }
312
313 /** See note on projection. */
314 CUDA const universe_type& operator[](int x) const {
315 return data[x];
316 }
317
319 is_at_top.tell_top();
320 return *this;
321 }
322
323 /** Given an abstract variable `v`, `tell(VID(v), dom)` will update the domain of this variable with the new information `dom`.
324 * This `tell` method follows PCCP's model, but the variable `x` must already be initialized in the store.
325 */
326 CUDA this_type& tell(int x, const universe_type& dom) {
327 assert(x < data.size());
328 data[x].tell(dom);
329 is_at_top.tell(data[x].is_top());
330 return *this;
331 }
332
333 /** This `tell` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
334 template <class Mem>
335 CUDA this_type& tell(int x, const universe_type& dom, BInc<Mem>& has_changed) {
336 assert(x < data.size());
337 data[x].tell(dom, has_changed);
338 is_at_top.tell(data[x].is_top());
339 return *this;
340 }
341
342 /** This `tell` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
343 CUDA this_type& tell(AVar x, const universe_type& dom) {
344 assert(x.aty() == aty());
345 return tell(x.vid(), dom);
346 }
347
348 /** This `tell` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
349 template <class Mem>
350 CUDA this_type& tell(AVar x, const universe_type& dom, BInc<Mem>& has_changed) {
351 assert(x.aty() == aty());
352 return tell(x.vid(), dom, has_changed);
353 }
354
355 /** This tell method can grow the store if required, and therefore do not satisfy the PCCP model.
356 * /!\ It should not be used in parallel.
357 */
358 template <class Alloc2, class Mem>
359 CUDA this_type& tell(const tell_type<Alloc2>& t, BInc<Mem>& has_changed) {
360 if(t.size() == 0) {
361 return *this;
362 }
363 if(t[0].avar == AVar{}) {
364 is_at_top.tell(local::BInc(true), has_changed);
365 return *this;
366 }
367 if(t.back().avar.vid() >= data.size()) {
368 data.resize(t.back().avar.vid()+1);
369 }
370 for(int i = 0; i < t.size(); ++i) {
371 tell(t[i].avar, t[i].dom, has_changed);
372 }
373 return *this;
374 }
375
376 /** This tell method can grow the store if required, and therefore do not satisfy the PCCP model.
377 * /!\ It should not be used in parallel.
378 */
379 template <class Alloc2>
381 local::BInc has_changed;
382 return tell(t, has_changed);
383 }
384
385 /** Precondition: `other` must be smaller or equal in size than the current store. */
386 template <class U2, class Alloc2, class Mem>
387 CUDA this_type& tell(const VStore<U2, Alloc2>& other, BInc<Mem>& has_changed) {
388 is_at_top.tell(other.is_at_top, has_changed);
389 int min_size = battery::min(vars(), other.vars());
390 for(int i = 0; i < min_size; ++i) {
391 data[i].tell(other[i], has_changed);
392 }
393 for(int i = min_size; i < other.vars(); ++i) {
394 assert(other[i].is_bot()); // the size of the current store cannot be modified.
395 }
396 return *this;
397 }
398
399 /** Precondition: `other` must be smaller or equal in size than the current store. */
400 template <class U2, class Alloc2>
401 CUDA this_type& tell(const VStore<U2, Alloc2>& other) {
402 local::BInc has_changed;
403 return tell(other, has_changed);
404 }
405
407 is_at_top.dtell_bot();
408 for(int i = 0; i < data.size(); ++i) {
409 data[i].dtell_bot();
410 }
411 return *this;
412 }
413
414 /** Precondition: `other` must be smaller or equal in size than the current store. */
415 template <class U2, class Alloc2, class Mem>
416 CUDA this_type& dtell(const VStore<U2, Alloc2>& other, BInc<Mem>& has_changed) {
417 if(other.is_top()) {
418 return *this;
419 }
420 int min_size = battery::min(vars(), other.vars());
421 is_at_top.dtell(other.is_at_top, has_changed);
422 for(int i = 0; i < min_size; ++i) {
423 data[i].dtell(other[i], has_changed);
424 }
425 for(int i = min_size; i < vars(); ++i) {
426 data[i].dtell(U::bot(), has_changed);
427 }
428 for(int i = min_size; i < other.vars(); ++i) {
429 assert(other[i].is_bot());
430 }
431 return *this;
432 }
433
434 /** Precondition: `other` must be smaller or equal in size than the current store. */
435 template <class U2, class Alloc2, class Mem>
436 CUDA this_type& dtell(const VStore<U2, Alloc2>& other) {
437 local::BInc has_changed;
438 return dtell(other, has_changed);
439 }
440
441 /** \return `true` when we can deduce the content of `t` from the current domain.
442 * For instance, if we have in the store `x = [0..10]`, we can deduce `x = [-1..11]` but we cannot deduce `x = [5..8]`. */
443 template <class Alloc2>
444 CUDA local::BInc ask(const ask_type<Alloc2>& t) const {
445 for(int i = 0; i < t.size(); ++i) {
446 if(!(data[t[i].avar.vid()] >= t[i].dom)) {
447 return false;
448 }
449 }
450 return true;
451 }
452
453 CUDA size_t num_refinements() const { return 0; }
454 template <class M>
455 CUDA void refine(size_t, BInc<M>&) const { assert(false); }
456
457 /** An abstract element is extractable when it is not equal to top.
458 * If the strategy is `atoms`, we check the domains are singleton.
459 */
460 template<class ExtractionStrategy = NonAtomicExtraction>
461 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
462 if(is_top()) {
463 return false;
464 }
465 if constexpr(ExtractionStrategy::atoms) {
466 for(int i = 0; i < data.size(); ++i) {
467 if(data[i].lb() < dual<typename universe_type::LB>(data[i].ub())) {
468 return false;
469 }
470 }
471 }
472 return true;
473 }
474
475 /** Whenever `this` is different from `top`, we extract its data into `ua`.
476 * \pre `is_extractable()` must be `true`.
477 * For now, we suppose VStore is only used to store under-approximation, I'm not sure yet how we would interact with over-approximation. */
478 template<class U2, class Alloc2>
479 CUDA void extract(VStore<U2, Alloc2>& ua) const {
480 if((void*)&ua != (void*)this) {
481 ua.data = data;
482 ua.is_at_top.dtell_bot();
483 }
484 }
485
486 template<class Env>
489 typename F::Sequence seq{env.get_allocator()};
490 for(int i = 0; i < data.size(); ++i) {
491 AVar v(aty(), i);
492 seq.push_back(F::make_exists(aty(), env.name_of(v), env.sort_of(v)));
493 auto f = data[i].deinterpret(v, env);
494 f.type_as(aty());
495 map_avar_to_lvar(f, env);
496 seq.push_back(std::move(f));
497 }
498 return F::make_nary(AND, std::move(seq), aty());
499 }
500
501 CUDA void print() const {
502 if(is_top()) {
503 printf("\u22A4 | ");
504 }
505 printf("<");
506 for(int i = 0; i < vars(); ++i) {
507 data[i].print();
508 printf("%s", (i+1 == vars() ? "" : ", "));
509 }
510 printf(">\n");
511 }
512};
513
514// Lattice operations.
515// Note that we do not consider the logical names.
516// These operations are only considering the indices of the elements.
517
518template<class L, class K, class Alloc>
519CUDA auto join(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
520{
521 using U = decltype(join(a[0], b[0]));
522 if(a.is_top() || b.is_top()) {
524 }
525 int max_size = battery::max(a.vars(), b.vars());
526 int min_size = battery::min(a.vars(), b.vars());
527 VStore<U, Alloc> res(UNTYPED, max_size, a.get_allocator());
528 for(int i = 0; i < min_size; ++i) {
529 res.tell(i, join(a[i], b[i]));
530 }
531 for(int i = min_size; i < a.vars(); ++i) {
532 res.tell(i, a[i]);
533 }
534 for(int i = min_size; i < b.vars(); ++i) {
535 res.tell(i, b[i]);
536 }
537 return res;
538}
539
540template<class L, class K, class Alloc>
541CUDA auto meet(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
542{
543 using U = decltype(meet(a[0], b[0]));
544 if(a.is_top()) {
545 if(b.is_top()) {
547 }
548 else {
549 return VStore<U, Alloc>(b);
550 }
551 }
552 else if(b.is_top()) {
553 return VStore<U, Alloc>(a);
554 }
555 else {
556 int min_size = battery::min(a.vars(), b.vars());
557 VStore<U, Alloc> res(UNTYPED, min_size, a.get_allocator());
558 for(int i = 0; i < min_size; ++i) {
559 res.tell(i, meet(a[i], b[i]));
560 }
561 return res;
562 }
563}
564
565template<class L, class K, class Alloc1, class Alloc2>
566CUDA bool operator<=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
567{
568 if(b.is_top()) {
569 return true;
570 }
571 else {
572 int min_size = battery::min(a.vars(), b.vars());
573 for(int i = 0; i < min_size; ++i) {
574 if(a[i] > b[i]) {
575 return false;
576 }
577 }
578 for(int i = min_size; i < a.vars(); ++i) {
579 if(!a[i].is_bot()) {
580 return false;
581 }
582 }
583 }
584 return true;
585}
586
587template<class L, class K, class Alloc1, class Alloc2>
588CUDA bool operator<(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
589{
590 if(b.is_top()) {
591 return !a.is_top();
592 }
593 else {
594 int min_size = battery::min(a.vars(), b.vars());
595 bool strict = false;
596 for(int i = 0; i < b.vars(); ++i) {
597 if(i < a.vars()) {
598 if(a[i] < b[i]) {
599 strict = true;
600 }
601 else if(a[i] > b[i]) {
602 return false;
603 }
604 }
605 else if(!b[i].is_bot()) {
606 strict = true;
607 }
608 }
609 for(int i = min_size; i < a.vars(); ++i) {
610 if(!a[i].is_bot()) {
611 return false;
612 }
613 }
614 return strict;
615 }
616}
617
618template<class L, class K, class Alloc1, class Alloc2>
619CUDA bool operator>=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
620{
621 return b <= a;
622}
623
624template<class L, class K, class Alloc1, class Alloc2>
625CUDA bool operator>(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
626{
627 return b < a;
628}
629
630template<class L, class K, class Alloc1, class Alloc2>
631CUDA bool operator==(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
632{
633 if(a.is_top()) {
634 return b.is_top();
635 }
636 else if(b.is_top()) {
637 return false;
638 }
639 else {
640 int min_size = battery::min(a.vars(), b.vars());
641 for(int i = 0; i < min_size; ++i) {
642 if(a[i] != b[i]) {
643 return false;
644 }
645 }
646 for(int i = min_size; i < a.vars(); ++i) {
647 if(!a[i].is_bot()) {
648 return false;
649 }
650 }
651 for(int i = min_size; i < b.vars(); ++i) {
652 if(!b[i].is_bot()) {
653 return false;
654 }
655 }
656 }
657 return true;
658}
659
660template<class L, class K, class Alloc1, class Alloc2>
661CUDA bool operator!=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
662{
663 return !(a == b);
664}
665
666template<class L, class Alloc>
667std::ostream& operator<<(std::ostream &s, const VStore<L, Alloc> &vstore) {
668 if(vstore.is_top()) {
669 s << "\u22A4: ";
670 }
671 else {
672 s << "<";
673 for(int i = 0; i < vstore.vars(); ++i) {
674 s << vstore[i] << (i+1 == vstore.vars() ? "" : ", ");
675 }
676 s << ">";
677 }
678 return s;
679}
680
681} // namespace lala
682
683#endif
Definition ast.hpp:38
CUDA constexpr int vid() const
Definition ast.hpp:69
CUDA constexpr int aty() const
Definition ast.hpp:65
Definition abstract_deps.hpp:28
Definition diagnostics.hpp:19
Definition primitive_upset.hpp:118
CUDA constexpr this_type & dtell(const this_type2< M1 > &other, BInc< M2 > &has_changed)
Definition primitive_upset.hpp:265
CUDA constexpr this_type & dtell_bot()
Definition primitive_upset.hpp:259
CUDA constexpr this_type & tell_top()
Definition primitive_upset.hpp:233
CUDA constexpr this_type & tell(const this_type2< M1 > &other, BInc< M2 > &has_changed)
Definition primitive_upset.hpp:239
Definition ast.hpp:234
Definition vstore.hpp:38
battery::vector< var_dom< Alloc >, Alloc > tell_type
Definition vstore.hpp:58
static CUDA this_type top(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:141
static constexpr const bool is_abstract_universe
Definition vstore.hpp:66
static constexpr const bool preserve_top
Definition vstore.hpp:70
static constexpr const bool preserve_bot
Definition vstore.hpp:69
CUDA this_type & tell(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:401
CUDA local::BInc is_top() const
Definition vstore.hpp:162
static CUDA this_type bot(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:134
CUDA const universe_type & project(AVar x) const
Definition vstore.hpp:307
CUDA AType aty() const
Definition vstore.hpp:125
CUDA VStore(AType atype, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:94
static constexpr const bool sequential
Definition vstore.hpp:67
tell_type< Alloc > ask_type
Definition vstore.hpp:61
CUDA this_type & dtell(const VStore< U2, Alloc2 > &other, BInc< Mem > &has_changed)
Definition vstore.hpp:416
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition vstore.hpp:461
CUDA void print() const
Definition vstore.hpp:501
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:155
CUDA allocator_type get_allocator() const
Definition vstore.hpp:121
CUDA size_t num_refinements() const
Definition vstore.hpp:453
CUDA this_type & tell(const tell_type< Alloc2 > &t, BInc< Mem > &has_changed)
Definition vstore.hpp:359
battery::vector< local_universe, Alloc > snapshot_type
Definition vstore.hpp:64
CUDA this_type & dtell(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:436
CUDA this_type & tell_top()
Definition vstore.hpp:318
U universe_type
Definition vstore.hpp:40
static constexpr const bool preserve_join
Definition vstore.hpp:71
CUDA VStore(const VStore< R, allocator_type > &other)
Definition vstore.hpp:103
static constexpr const char * name
Definition vstore.hpp:75
CUDA snapshot_type< Alloc > snapshot(const Alloc &alloc=Alloc()) const
Definition vstore.hpp:180
typename universe_type::local_type local_universe
Definition vstore.hpp:41
friend class VStore
Definition vstore.hpp:78
CUDA local::BDec is_bot() const
Definition vstore.hpp:168
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition vstore.hpp:275
CUDA VStore(AType atype, size_t size, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:98
CUDA this_type & restore(const snapshot_type< Alloc > &snap)
Definition vstore.hpp:185
CUDA const universe_type & operator[](int x) const
Definition vstore.hpp:314
static constexpr const bool injective_concretization
Definition vstore.hpp:73
static constexpr const bool preserve_concrete_covers
Definition vstore.hpp:74
CUDA this_type & dtell_bot()
Definition vstore.hpp:406
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition vstore.hpp:302
CUDA VStore(this_type &&other)
Definition vstore.hpp:118
CUDA VStore(const this_type &other)
Definition vstore.hpp:89
CUDA this_type & tell(int x, const universe_type &dom)
Definition vstore.hpp:326
static constexpr const bool is_totally_ordered
Definition vstore.hpp:68
CUDA this_type & tell(AVar x, const universe_type &dom)
Definition vstore.hpp:343
CUDA void refine(size_t, BInc< M > &) const
Definition vstore.hpp:455
CUDA this_type & tell(int x, const universe_type &dom, BInc< Mem > &has_changed)
Definition vstore.hpp:335
CUDA this_type & tell(AVar x, const universe_type &dom, BInc< Mem > &has_changed)
Definition vstore.hpp:350
CUDA local::BInc ask(const ask_type< Alloc2 > &t) const
Definition vstore.hpp:444
static constexpr const bool preserve_meet
Definition vstore.hpp:72
CUDA void extract(VStore< U2, Alloc2 > &ua) const
Definition vstore.hpp:479
CUDA VStore(const VStore< R, Alloc2 > &other, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:108
CUDA size_t vars() const
Definition vstore.hpp:130
CUDA NI TFormula< typename Env::allocator_type > deinterpret(const Env &env) const
Definition vstore.hpp:487
CUDA VStore(const VStore< R, Alloc2 > &other, const AbstractDeps< Allocators... > &deps)
Definition vstore.hpp:115
CUDA this_type & tell(const tell_type< Alloc2 > &t)
Definition vstore.hpp:380
Allocator allocator_type
Definition vstore.hpp:42
CUDA this_type & tell(const VStore< U2, Alloc2 > &other, BInc< Mem > &has_changed)
Definition vstore.hpp:387
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition vstore.hpp:296
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:148
#define RETURN_INTERPRETATION_ERROR(MSG)
Definition diagnostics.hpp:155
Definition abstract_deps.hpp:14
std::ostream & operator<<(std::ostream &s, const CartesianProduct< A, As... > &cp)
Definition cartesian_product.hpp:597
CUDA NI void map_avar_to_lvar(F &f, const Env &env, bool erase_type=false)
Definition algorithm.hpp:418
int AType
Definition sort.hpp:18
CUDA constexpr bool operator==(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:572
@ AND
Unary arithmetic function symbols.
Definition ast.hpp:114
CUDA constexpr auto join(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:534
CUDA constexpr auto meet(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:541
CUDA constexpr bool operator<(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:554
CUDA constexpr bool operator>(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:566
CUDA NI int num_vars(const F &f)
Definition algorithm.hpp:153
CUDA const TFormula< Allocator, ExtendedSig > & var_in(const TFormula< Allocator, ExtendedSig > &f)
Definition algorithm.hpp:145
#define UNTYPED
Definition sort.hpp:21
Definition vstore.hpp:17
static constexpr bool atoms
Definition vstore.hpp:18
Definition vstore.hpp:13
static constexpr bool atoms
Definition vstore.hpp:14
Definition vstore.hpp:46
CUDA var_dom(const Alloc &)
Definition vstore.hpp:51
var_dom(const var_dom< Alloc > &)=default
CUDA var_dom(AVar avar, const local_universe &dom)
Definition vstore.hpp:52
AVar avar
Definition vstore.hpp:47
local_universe dom
Definition vstore.hpp:48
CUDA var_dom(const VarDom &other)
Definition vstore.hpp:54