Lattice Land Core Library
Loading...
Searching...
No Matches
vstore.hpp
Go to the documentation of this file.
1// Copyright 2021 Pierre Talbot
2
3#ifndef LALA_CORE_VSTORE_HPP
4#define LALA_CORE_VSTORE_HPP
5
6#include "logic/logic.hpp"
8#include "abstract_deps.hpp"
9#include <optional>
10
11namespace lala {
12
14 static constexpr bool atoms = false;
15 };
16
18 static constexpr bool atoms = true;
19 };
20
21/** The variable store abstract domain is a _domain transformer_ built on top of an abstract universe `U`.
22Concretization function: \f$ \gamma(\rho) := \bigcap_{x \in \pi(\rho)} \gamma_{U_x}(\rho(x)) \f$.
23The bot element is smashed and the equality between two stores is represented by the following equivalence relation, for two stores \f$ S \f$ and \f$ T \f$:
24\f$ S \equiv T \Leftrightarrow \forall{x \in \mathit{Vars}},~S(x) = T(x) \lor \exists{x \in \mathit{dom}(S)},\exists{y \in \mathit{dom}(T)},~S(x) = \bot \land T(y) = \bot \f$.
25Intuitively, it means that either all elements are equal or both stores have a bot element, in which case they "collapse" to the bot element, and are considered equal.
26
27The top element is the element \f$ \langle \top, \ldots \rangle \f$, that is an infinite number of variables initialized to top.
28In practice, we cannot represent infinite collections, so we represent top either as the empty collection or with a finite number of top elements.
29Any finite store \f$ \langle x_1, \ldots, x_n \rangle \f$ should be seen as the concrete store \f$ \langle x_1, \ldots, x_n, \top, \ldots \rangle \f$.
30
31This semantics has implication when joining or merging two elements.
32For instance, \f$ \langle 1 \rangle.\mathit{meet}(\langle \bot, 4 \rangle) \f$ will be equal to bottom, in that case represented by \f$ \langle \bot \rangle \f$.
33
34Template parameters:
35 - `U` is the type of the abstract universe.
36 - `Allocator` is the allocator of the underlying array of universes. */
37template<class U, class Allocator>
38class VStore {
39public:
40 using universe_type = U;
41 using local_universe = typename universe_type::local_type;
42 using allocator_type = Allocator;
44
45 template <class Alloc>
46 struct var_dom {
49 var_dom() = default;
50 var_dom(const var_dom<Alloc>&) = default;
51 CUDA explicit var_dom(const Alloc&) {}
53 template <class VarDom>
54 CUDA var_dom(const VarDom& other): avar(other.avar), dom(other.dom) {}
55 };
56
57 template <class Alloc>
58 using tell_type = battery::vector<var_dom<Alloc>, Alloc>;
59
60 template <class Alloc>
62
63 template <class Alloc = allocator_type>
64 using snapshot_type = battery::vector<local_universe, Alloc>;
65
66 constexpr static const bool is_abstract_universe = false;
67 constexpr static const bool sequential = universe_type::sequential;
68 constexpr static const bool is_totally_ordered = false;
69 constexpr static const bool preserve_bot = true;
70 constexpr static const bool preserve_top = true;
71 constexpr static const bool preserve_join = universe_type::preserve_join;
72 constexpr static const bool preserve_meet = universe_type::preserve_meet;
73 constexpr static const bool injective_concretization = universe_type::injective_concretization;
74 constexpr static const bool preserve_concrete_covers = universe_type::preserve_concrete_covers;
75 constexpr static const char* name = "VStore";
76
77 template<class U2, class Alloc2>
78 friend class VStore;
79
80private:
81 using store_type = battery::vector<universe_type, allocator_type>;
82 using memory_type = typename universe_type::memory_type;
83
84 AType atype;
85 store_type data;
86 B<memory_type> is_at_bot;
87
88public:
89 CUDA VStore(const this_type& other)
90 : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
91 {}
92
93 /** Initialize an empty store. */
94 CUDA VStore(AType atype, const allocator_type& alloc = allocator_type())
95 : atype(atype), data(alloc), is_at_bot(false)
96 {}
97
98 CUDA VStore(AType atype, size_t size, const allocator_type& alloc = allocator_type())
99 : atype(atype), data(size, alloc), is_at_bot(false)
100 {}
101
102 template<class R>
104 : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
105 {}
106
107 template<class R, class Alloc2>
108 CUDA VStore(const VStore<R, Alloc2>& other, const allocator_type& alloc = allocator_type())
109 : atype(other.atype), data(other.data, alloc), is_at_bot(other.is_at_bot)
110 {}
111
112 /** Copy the vstore `other` in the current element.
113 * `deps` can be empty and is not used besides to get the allocator (since this abstract domain does not have dependencies). */
114 template<class R, class Alloc2, class... Allocators>
116 : VStore(other, deps.template get_allocator<allocator_type>()) {}
117
118 CUDA VStore(this_type&& other):
119 atype(other.atype), data(std::move(other.data)), is_at_bot(other.is_at_bot) {}
120
122 return data.get_allocator();
123 }
124
125 CUDA AType aty() const {
126 return atype;
127 }
128
129 /** Returns the number of variables currently represented by this abstract element. */
130 CUDA size_t vars() const {
131 return data.size();
132 }
133
134 CUDA static this_type top(AType atype = UNTYPED,
135 const allocator_type& alloc = allocator_type{})
136 {
137 return VStore(atype, alloc);
138 }
139
140 /** A special symbolic element representing top. */
141 CUDA static this_type bot(AType atype = UNTYPED,
142 const allocator_type& alloc = allocator_type{})
143 {
144 auto s = VStore{atype, alloc};
145 s.meet_bot();
146 return std::move(s);
147 }
148
149 template <class Env>
150 CUDA static this_type bot(Env& env,
151 const allocator_type& alloc = allocator_type{})
152 {
153 return bot(env.extends_abstract_dom(), alloc);
154 }
155
156 template <class Env>
157 CUDA static this_type top(Env& env,
158 const allocator_type& alloc = allocator_type{})
159 {
160 return top(env.extends_abstract_dom(), alloc);
161 }
162
163 /** \return `true` if at least one element is equal to bot in the store, `false` otherwise.
164 * @parallel @order-preserving @increasing
165 */
166 CUDA local::B is_bot() const {
167 return is_at_bot;
168 }
169
170 /** The bottom element of a store of `n` variables is when all variables are at bottom, or the store is empty.
171 * We do not expect to use this operation a lot, so its complexity is linear in the number of variables.
172 * @parallel @order-preserving @decreasing */
173 CUDA local::B is_top() const {
174 if(is_at_bot) { return false; }
175 for(int i = 0; i < vars(); ++i) {
176 if(!data[i].is_top()) {
177 return false;
178 }
179 }
180 return true;
181 }
182
183 /** Take a snapshot of the current variable store. */
184 template <class Alloc = allocator_type>
185 CUDA snapshot_type<Alloc> snapshot(const Alloc& alloc = Alloc()) const {
186 return snapshot_type<Alloc>(data, alloc);
187 }
188
189 template <class Alloc>
191 while(snap.size() < data.size()) {
192 data.pop_back();
193 }
194 is_at_bot.meet_bot();
195 for(int i = 0; i < snap.size(); ++i) {
196 data[i].join(snap[i]);
197 is_at_bot.join(data[i].is_bot());
198 }
199 return *this;
200 }
201
202private:
203 template <bool diagnose, class F, class Env, class Alloc2>
204 CUDA NI bool interpret_existential(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
205 assert(f.is(F::E));
206 var_dom<Alloc2> k;
207 if(local_universe::template interpret_tell<diagnose>(f, env, k.dom, diagnostics)) {
208 if(env.interpret(f.map_atype(atype), k.avar, diagnostics)) {
209 tell.push_back(k);
210 return true;
211 }
212 }
213 return false;
214 }
215
216 /** Interpret a predicate without variables. */
217 template <bool diagnose, class F, class Env, class Alloc2>
218 CUDA NI bool interpret_zero_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
219 if(f.is_true()) {
220 return true;
221 }
222 else if(f.is_false()) {
223 tell.push_back(var_dom<Alloc2>(AVar{}, U::bot()));
224 return true;
225 }
226 else {
227 RETURN_INTERPRETATION_ERROR("Only `true` and `false` can be interpreted in the store without being named.");
228 }
229 }
230
231 /** Interpret a predicate with a single variable occurrence. */
232 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
233 CUDA NI bool interpret_unary_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
235 bool res = local_universe::template interpret<kind, diagnose>(f, env, u, diagnostics);
236 if(res) {
237 const auto& varf = var_in(f);
238 // When it is not necessary, we try to avoid using the environment.
239 // This is for instance useful when deduction operators add new constraints but do not have access to the environment (e.g., split()), and to avoid passing the environment around everywhere.
240 if(varf.is(F::V)) {
241 tell.push_back(var_dom<Alloc2>(varf.v(), u));
242 }
243 else {
244 auto var = var_in(f, env);
245 if(!var.has_value()) {
246 RETURN_INTERPRETATION_ERROR("Undeclared variable.");
247 }
248 auto avar = var->get().avar_of(atype);
249 if(!avar.has_value()) {
250 RETURN_INTERPRETATION_ERROR("The variable was not declared in the current abstract element (but exists in other abstract elements).");
251 }
252 tell.push_back(var_dom<Alloc2>(*avar, u));
253 }
254 return true;
255 }
256 else {
257 RETURN_INTERPRETATION_ERROR("Could not interpret a unary predicate in the underlying abstract universe.");
258 }
259 }
260
261 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
262 CUDA NI bool interpret_predicate(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
263 if(f.type() != UNTYPED && f.type() != aty()) {
264 RETURN_INTERPRETATION_ERROR("The abstract type of this predicate does not match the one of the current abstract element.");
265 }
266 if constexpr(kind == IKind::TELL) {
267 if(f.is(F::E)) {
268 return interpret_existential<diagnose>(f, env, tell, diagnostics);
269 }
270 }
271 switch(num_vars(f)) {
272 case 0: return interpret_zero_predicate<diagnose>(f, env, tell, diagnostics);
273 case 1: return interpret_unary_predicate<kind, diagnose>(f, env, tell, diagnostics);
274 default: RETURN_INTERPRETATION_ERROR("Interpretation of n-ary predicate is not supported in VStore.");
275 }
276 }
277
278public:
279 template <IKind kind, bool diagnose = false, class F, class Env, class I>
280 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
281 if(f.is_untyped() || f.type() == aty()) {
282 return interpret_predicate<kind, diagnose>(f, env, intermediate, diagnostics);
283 }
284 else {
285 RETURN_INTERPRETATION_ERROR("This abstract element cannot interpret a formula with a different type.");
286 }
287 }
288
289 /** The store of variables lattice expects a formula with a single variable (including existential quantifiers) that can be handled by the abstract universe `U`.
290 *
291 * Variables must be existentially quantified before a formula containing variables can be interpreted.
292 * Variables are immediately assigned to an index of `VStore` and initialized to \f$ \top_U \f$.
293 * Shadowing/redeclaration of variables with existential quantifier is not supported.
294 * The variable mapping is added to the environment only if the interpretation succeeds.
295
296 * There is a small quirk: different stores might be produced if quantifiers do not appear in the same order.
297 * This is because we attribute the first available index to variables when interpreting the quantifier.
298 * In that case, the store will only be equivalent modulo the `env` structure.
299 */
300 template <bool diagnose = false, class F, class Env, class Alloc2>
301 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
302 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
303 }
304
305 /** Similar to `interpret_tell` but do not support existential quantifier and therefore leaves `env` unchanged. */
306 template <bool diagnose = false, class F, class Env, class Alloc2>
307 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
308 return const_cast<this_type*>(this)->interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
309 }
310
311 template <class Group, class Store>
312 CUDA void copy_to(Group& group, Store& store) const {
313 assert(vars() == store.vars());
314 if(group.thread_rank() == 0) {
315 store.is_at_bot = is_at_bot;
316 }
317 if(is_at_bot) {
318 return;
319 }
320 for (size_t i = group.thread_rank(); i < store.vars(); i += group.num_threads()) {
321 store.data[i] = data[i];
322 }
323 }
324
325#ifdef __CUDACC__
326 void prefetch(int dstDevice) const {
327 if(!is_at_bot) {
328 cudaMemPrefetchAsync(data.data(), data.size() * sizeof(universe_type), dstDevice);
329 }
330 }
331#endif
332
333 /** Change the allocator of the underlying data, and reallocate the memory without copying the old data. */
334 CUDA void reset_data(allocator_type alloc) {
335 data = store_type(data.size(), alloc);
336 }
337
338 template <class Univ>
339 CUDA void project(AVar x, Univ& u) const {
340 u.meet(project(x));
341 }
342
343 CUDA const universe_type& project(AVar x) const {
344 assert(x.aty() == aty());
345 assert(x.vid() < data.size());
346 return data[x.vid()];
347 }
348
349 /** This projection must stay const, otherwise the user might tell new information in the universe, but we need to know in case we reach `top`. */
350 CUDA const universe_type& operator[](int x) const {
351 return data[x];
352 }
353
354 CUDA void meet_bot() {
355 is_at_bot.join_top();
356 }
357
358 /** Given an abstract variable `v`, `embed(VID(v), dom)` will update the domain of this variable with the new information `dom`.
359 * This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store.
360 * @parallel @order-preserving @increasing
361 */
362 CUDA bool embed(int x, const universe_type& dom) {
363 assert(x < data.size());
364 bool has_changed = data[x].meet(dom);
365 has_changed |= is_at_bot.join(data[x].is_bot());
366 return has_changed;
367 }
368
369 /** This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
370 CUDA bool embed(AVar x, const universe_type& dom) {
371 assert(x.aty() == aty());
372 return embed(x.vid(), dom);
373 }
374
375 /** This deduce method can grow the store if required, and therefore do not satisfy the PCCP model.
376 * @sequential @order-preserving @increasing
377 */
378 template <class Alloc2>
379 CUDA bool deduce(const tell_type<Alloc2>& t) {
380 if(t.size() == 0) {
381 return false;
382 }
383 if(t[0].avar == AVar{}) {
384 return is_at_bot.join(local::B(true));
385 }
386 if(t.back().avar.vid() >= data.size()) {
387 data.resize(t.back().avar.vid()+1);
388 }
389 bool has_changed = false;
390 for(int i = 0; i < t.size(); ++i) {
391 has_changed |= embed(t[i].avar, t[i].dom);
392 }
393 return has_changed;
394 }
395
396 /** Precondition: `other` must be smaller or equal in size than the current store. */
397 template <class U2, class Alloc2>
398 CUDA bool meet(const VStore<U2, Alloc2>& other) {
399 bool has_changed = is_at_bot.join(other.is_at_bot);
400 int min_size = battery::min(vars(), other.vars());
401 for(int i = 0; i < min_size; ++i) {
402 has_changed |= data[i].meet(other[i]);
403 }
404 for(int i = min_size; i < other.vars(); ++i) {
405 assert(other[i].is_top()); // the size of the current store cannot be modified.
406 }
407 return has_changed;
408 }
409
410 CUDA void join_top() {
411 is_at_bot.meet_bot();
412 for(int i = 0; i < data.size(); ++i) {
413 data[i].join_top();
414 }
415 }
416
417 /** Precondition: `other` must be smaller or equal in size than the current store. */
418 template <class U2, class Alloc2>
419 CUDA bool join(const VStore<U2, Alloc2>& other) {
420 if(other.is_bot()) {
421 return false;
422 }
423 int min_size = battery::min(vars(), other.vars());
424 bool has_changed = is_at_bot.meet(other.is_at_bot);
425 for(int i = 0; i < min_size; ++i) {
426 has_changed |= data[i].join(other[i]);
427 }
428 for(int i = min_size; i < vars(); ++i) {
429 has_changed |= data[i].join(U::top());
430 }
431 for(int i = min_size; i < other.vars(); ++i) {
432 assert(other[i].is_top());
433 }
434 return has_changed;
435 }
436
437 /** \return `true` when we can deduce the content of `t` from the current domain.
438 * For instance, if we have in the store `x = [0..10]`, we can deduce `x = [-1..11]` but we cannot deduce `x = [5..8]`.
439 * @parallel @order-preserving @decreasing */
440 template <class Alloc2>
441 CUDA local::B ask(const ask_type<Alloc2>& t) const {
442 for(int i = 0; i < t.size(); ++i) {
443 if(!(data[t[i].avar.vid()] <= t[i].dom)) {
444 return false;
445 }
446 }
447 return true;
448 }
449
450 CUDA size_t num_deductions() const { return 0; }
451 CUDA local::B deduce(size_t) const { assert(false); return false; }
452
453 /** An abstract element is extractable when it is not equal to bot.
454 * If the strategy is `atoms`, we check the domains are singleton.
455 */
456 template<class ExtractionStrategy = NonAtomicExtraction>
457 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
458 if(is_bot()) {
459 return false;
460 }
461 if constexpr(ExtractionStrategy::atoms) {
462 for(int i = 0; i < data.size(); ++i) {
463 if(dual<typename universe_type::UB>(data[i].lb()) != data[i].ub()) {
464 return false;
465 }
466 }
467 }
468 return true;
469 }
470
471#ifdef __CUDACC__
472 template<class ExtractionStrategy = NonAtomicExtraction>
473 __device__ bool is_extractable(auto& group, const ExtractionStrategy& strategy = ExtractionStrategy()) const {
474 if(is_bot()) {
475 return false;
476 }
477 if constexpr(ExtractionStrategy::atoms) {
478 __shared__ bool res;
479 if(group.thread_rank() == 0) {
480 res = true;
481 }
482 group.sync();
483 for(int i = group.thread_rank(); i < data.size(); i += group.num_threads()) {
484 if(dual<typename universe_type::UB>(data[i].lb()) != data[i].ub()) {
485 res = false;
486 }
487 }
488 group.sync();
489 return res;
490 }
491 else {
492 return true;
493 }
494 }
495#endif
496
497 /** Whenever `this` is different from `bot`, we extract its data into `ua`.
498 * \pre `is_extractable()` must be `true`.
499 * For now, we suppose VStore is only used to store under-approximation, I'm not sure yet how we would interact with over-approximation. */
500 template<class U2, class Alloc2>
501 CUDA void extract(VStore<U2, Alloc2>& ua) const {
502 if((void*)&ua != (void*)this) {
503 ua.data = data;
504 ua.is_at_bot.meet_bot();
505 }
506 }
507
508private:
509 template<class U2, class Env, class Allocator2>
510 CUDA TFormula<typename Env::allocator_type> deinterpret(AVar avar, const U2& dom, const Env& env, const Allocator2& allocator) const {
511 auto f = dom.deinterpret(avar, env, allocator);
512 f.type_as(aty());
513 map_avar_to_lvar(f, env);
514 return std::move(f);
515 }
516
517public:
518 template<class Env, class Allocator2 = typename Env::allocator_type>
519 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, const Allocator2& allocator = Allocator2()) const {
520 using F = TFormula<Allocator2>;
521 typename F::Sequence seq{allocator};
522 for(int i = 0; i < data.size(); ++i) {
523 AVar v(aty(), i);
524 seq.push_back(F::make_exists(aty(), env.name_of(v), env.sort_of(v)));
525 seq.push_back(deinterpret(AVar(aty(), i), data[i], env, allocator));
526 }
527 return F::make_nary(AND, std::move(seq), aty());
528 }
529
530 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
531 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, const Allocator2& allocator = Allocator2()) const {
532 using F = TFormula<Allocator2>;
533 if(intermediate.size() == 0) {
534 return F::make_true();
535 }
536 else if(intermediate.size() == 1) {
537 return deinterpret(intermediate[0].avar, intermediate[0].dom, env, allocator);
538 }
539 else {
540 typename F::Sequence seq{allocator};
541 for(int i = 0; i < intermediate.size(); ++i) {
542 seq.push_back(deinterpret(intermediate[i].avar, intermediate[i].dom, env, allocator));
543 }
544 return F::make_nary(AND, std::move(seq), aty());
545 }
546 }
547
548 CUDA void print() const {
549 if(is_top()) {
550 printf("\u22A4 | ");
551 }
552 printf("<");
553 for(int i = 0; i < vars(); ++i) {
554 data[i].print();
555 printf("%s", (i+1 == vars() ? "" : ", "));
556 }
557 printf(">\n");
558 }
559};
560
561// Lattice operations.
562// Note that we do not consider the logical names.
563// These operations are only considering the indices of the elements.
564
565template<class L, class K, class Alloc>
566CUDA auto fmeet(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
567{
568 using U = decltype(fmeet(a[0], b[0]));
569 if(a.is_bot() || b.is_bot()) {
571 }
572 int max_size = battery::max(a.vars(), b.vars());
573 int min_size = battery::min(a.vars(), b.vars());
574 VStore<U, Alloc> res(UNTYPED, max_size, a.get_allocator());
575 for(int i = 0; i < min_size; ++i) {
576 res.embed(i, fmeet(a[i], b[i]));
577 }
578 for(int i = min_size; i < a.vars(); ++i) {
579 res.embed(i, a[i]);
580 }
581 for(int i = min_size; i < b.vars(); ++i) {
582 res.embed(i, b[i]);
583 }
584 return res;
585}
586
587template<class L, class K, class Alloc>
588CUDA auto fjoin(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
589{
590 using U = decltype(fjoin(a[0], b[0]));
591 if(a.is_bot()) {
592 if(b.is_bot()) {
594 }
595 else {
596 return VStore<U, Alloc>(b);
597 }
598 }
599 else if(b.is_bot()) {
600 return VStore<U, Alloc>(a);
601 }
602 else {
603 int min_size = battery::min(a.vars(), b.vars());
604 VStore<U, Alloc> res(UNTYPED, min_size, a.get_allocator());
605 for(int i = 0; i < min_size; ++i) {
606 res.embed(i, fjoin(a[i], b[i]));
607 }
608 return res;
609 }
610}
611
612template<class L, class K, class Alloc1, class Alloc2>
613CUDA bool operator<=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
614{
615 if(b.is_top()) {
616 return true;
617 }
618 else {
619 int min_size = battery::min(a.vars(), b.vars());
620 for(int i = 0; i < min_size; ++i) {
621 if(!(a[i] <= b[i])) {
622 return false;
623 }
624 }
625 for(int i = min_size; i < b.vars(); ++i) {
626 if(!b[i].is_top()) {
627 return false;
628 }
629 }
630 }
631 return true;
632}
633
634template<class L, class K, class Alloc1, class Alloc2>
635CUDA bool operator<(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
636{
637 if(a.is_bot()) {
638 return !b.is_bot();
639 }
640 else {
641 int min_size = battery::min(a.vars(), b.vars());
642 bool strict = false;
643 for(int i = 0; i < a.vars(); ++i) {
644 if(i < b.vars()) {
645 if(a[i] < b[i]) {
646 strict = true;
647 }
648 else if(!(a[i] <= b[i])) {
649 return false;
650 }
651 }
652 else if(!a[i].is_top()) {
653 strict = true;
654 break;
655 }
656 }
657 for(int i = min_size; i < b.vars(); ++i) {
658 if(!b[i].is_top()) {
659 return false;
660 }
661 }
662 return strict;
663 }
664}
665
666template<class L, class K, class Alloc1, class Alloc2>
667CUDA bool operator>=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
668{
669 return b <= a;
670}
671
672template<class L, class K, class Alloc1, class Alloc2>
673CUDA bool operator>(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
674{
675 return b < a;
676}
677
678template<class L, class K, class Alloc1, class Alloc2>
679CUDA bool operator==(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
680{
681 if(a.is_bot()) {
682 return b.is_bot();
683 }
684 else if(b.is_bot()) {
685 return false;
686 }
687 else {
688 int min_size = battery::min(a.vars(), b.vars());
689 for(int i = 0; i < min_size; ++i) {
690 if(a[i] != b[i]) {
691 return false;
692 }
693 }
694 for(int i = min_size; i < a.vars(); ++i) {
695 if(!a[i].is_top()) {
696 return false;
697 }
698 }
699 for(int i = min_size; i < b.vars(); ++i) {
700 if(!b[i].is_top()) {
701 return false;
702 }
703 }
704 }
705 return true;
706}
707
708template<class L, class K, class Alloc1, class Alloc2>
709CUDA bool operator!=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
710{
711 return !(a == b);
712}
713
714template<class L, class Alloc>
715std::ostream& operator<<(std::ostream &s, const VStore<L, Alloc> &vstore) {
716 if(vstore.is_bot()) {
717 s << "\u22A5: ";
718 }
719 else {
720 s << "<";
721 for(int i = 0; i < vstore.vars(); ++i) {
722 s << vstore[i] << (i+1 == vstore.vars() ? "" : ", ");
723 }
724 s << ">";
725 }
726 return s;
727}
728
729} // namespace lala
730
731#endif
Definition ast.hpp:38
CUDA constexpr int vid() const
Definition ast.hpp:69
CUDA constexpr int aty() const
Definition ast.hpp:65
Definition abstract_deps.hpp:28
Definition b.hpp:10
Definition diagnostics.hpp:19
Definition ast.hpp:234
CUDA void type_as(AType ty)
Definition ast.hpp:347
Definition vstore.hpp:38
battery::vector< var_dom< Alloc >, Alloc > tell_type
Definition vstore.hpp:58
static CUDA this_type top(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:134
static constexpr const bool is_abstract_universe
Definition vstore.hpp:66
CUDA bool deduce(const tell_type< Alloc2 > &t)
Definition vstore.hpp:379
static constexpr const bool preserve_top
Definition vstore.hpp:70
CUDA local::B is_top() const
Definition vstore.hpp:173
static constexpr const bool preserve_bot
Definition vstore.hpp:69
CUDA bool join(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:419
CUDA bool meet(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:398
CUDA void copy_to(Group &group, Store &store) const
Definition vstore.hpp:312
static CUDA this_type bot(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:141
CUDA const universe_type & project(AVar x) const
Definition vstore.hpp:343
CUDA AType aty() const
Definition vstore.hpp:125
CUDA VStore(AType atype, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:94
static constexpr const bool sequential
Definition vstore.hpp:67
tell_type< Alloc > ask_type
Definition vstore.hpp:61
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition vstore.hpp:457
CUDA void print() const
Definition vstore.hpp:548
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:157
CUDA allocator_type get_allocator() const
Definition vstore.hpp:121
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition vstore.hpp:531
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition vstore.hpp:441
CUDA void project(AVar x, Univ &u) const
Definition vstore.hpp:339
CUDA void join_top()
Definition vstore.hpp:410
CUDA bool embed(AVar x, const universe_type &dom)
Definition vstore.hpp:370
battery::vector< local_universe, Alloc > snapshot_type
Definition vstore.hpp:64
CUDA local::B is_bot() const
Definition vstore.hpp:166
U universe_type
Definition vstore.hpp:40
static constexpr const bool preserve_join
Definition vstore.hpp:71
CUDA VStore(const VStore< R, allocator_type > &other)
Definition vstore.hpp:103
static constexpr const char * name
Definition vstore.hpp:75
CUDA snapshot_type< Alloc > snapshot(const Alloc &alloc=Alloc()) const
Definition vstore.hpp:185
typename universe_type::local_type local_universe
Definition vstore.hpp:41
friend class VStore
Definition vstore.hpp:78
CUDA bool embed(int x, const universe_type &dom)
Definition vstore.hpp:362
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition vstore.hpp:280
CUDA VStore(AType atype, size_t size, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:98
CUDA this_type & restore(const snapshot_type< Alloc > &snap)
Definition vstore.hpp:190
CUDA const universe_type & operator[](int x) const
Definition vstore.hpp:350
static constexpr const bool injective_concretization
Definition vstore.hpp:73
static constexpr const bool preserve_concrete_covers
Definition vstore.hpp:74
CUDA void meet_bot()
Definition vstore.hpp:354
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition vstore.hpp:307
CUDA VStore(this_type &&other)
Definition vstore.hpp:118
CUDA VStore(const this_type &other)
Definition vstore.hpp:89
static constexpr const bool is_totally_ordered
Definition vstore.hpp:68
CUDA local::B deduce(size_t) const
Definition vstore.hpp:451
CUDA void reset_data(allocator_type alloc)
Definition vstore.hpp:334
CUDA size_t num_deductions() const
Definition vstore.hpp:450
static constexpr const bool preserve_meet
Definition vstore.hpp:72
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition vstore.hpp:519
CUDA void extract(VStore< U2, Alloc2 > &ua) const
Definition vstore.hpp:501
CUDA VStore(const VStore< R, Alloc2 > &other, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:108
CUDA size_t vars() const
Definition vstore.hpp:130
CUDA VStore(const VStore< R, Alloc2 > &other, const AbstractDeps< Allocators... > &deps)
Definition vstore.hpp:115
Allocator allocator_type
Definition vstore.hpp:42
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition vstore.hpp:301
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:150
#define RETURN_INTERPRETATION_ERROR(MSG)
Definition diagnostics.hpp:155
Definition abstract_deps.hpp:14
std::ostream & operator<<(std::ostream &s, const CartesianProduct< A, As... > &cp)
Definition cartesian_product.hpp:530
CUDA NI void map_avar_to_lvar(F &f, const Env &env, bool erase_type=false)
Definition algorithm.hpp:398
CUDA constexpr LDual dual(const L &x)
Definition arith_bound.hpp:110
int AType
Definition sort.hpp:18
CUDA constexpr auto fjoin(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:463
CUDA constexpr bool operator==(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:503
@ AND
Unary arithmetic function symbols.
Definition ast.hpp:114
CUDA constexpr bool operator<(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:484
CUDA constexpr auto fmeet(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:470
CUDA constexpr bool operator>(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:497
CUDA NI int num_vars(const F &f)
Definition algorithm.hpp:153
CUDA const TFormula< Allocator, ExtendedSig > & var_in(const TFormula< Allocator, ExtendedSig > &f)
Definition algorithm.hpp:145
#define UNTYPED
Definition sort.hpp:21
Definition vstore.hpp:17
static constexpr bool atoms
Definition vstore.hpp:18
Definition vstore.hpp:13
static constexpr bool atoms
Definition vstore.hpp:14
Definition vstore.hpp:46
CUDA var_dom(const Alloc &)
Definition vstore.hpp:51
var_dom(const var_dom< Alloc > &)=default
CUDA var_dom(AVar avar, const local_universe &dom)
Definition vstore.hpp:52
AVar avar
Definition vstore.hpp:47
local_universe dom
Definition vstore.hpp:48
CUDA var_dom(const VarDom &other)
Definition vstore.hpp:54