Lattice Land Core Library
Loading...
Searching...
No Matches
vstore.hpp
Go to the documentation of this file.
1// Copyright 2021 Pierre Talbot
2
3#ifndef LALA_CORE_VSTORE_HPP
4#define LALA_CORE_VSTORE_HPP
5
6#include "logic/logic.hpp"
8#include "abstract_deps.hpp"
9#include <optional>
10
11namespace lala {
12
14 static constexpr bool atoms = false;
15 };
16
18 static constexpr bool atoms = true;
19 };
20
21/** The variable store abstract domain is a _domain transformer_ built on top of an abstract universe `U`.
22Concretization function: \f$ \gamma(\rho) := \bigcap_{x \in \pi(\rho)} \gamma_{U_x}(\rho(x)) \f$.
23The bot element is smashed and the equality between two stores is represented by the following equivalence relation, for two stores \f$ S \f$ and \f$ T \f$:
24\f$ S \equiv T \Leftrightarrow \forall{x \in \mathit{Vars}},~S(x) = T(x) \lor \exists{x \in \mathit{dom}(S)},\exists{y \in \mathit{dom}(T)},~S(x) = \bot \land T(y) = \bot \f$.
25Intuitively, it means that either all elements are equal or both stores have a bot element, in which case they "collapse" to the bot element, and are considered equal.
26
27The top element is the element \f$ \langle \top, \ldots \rangle \f$, that is an infinite number of variables initialized to top.
28In practice, we cannot represent infinite collections, so we represent top either as the empty collection or with a finite number of top elements.
29Any finite store \f$ \langle x_1, \ldots, x_n \rangle \f$ should be seen as the concrete store \f$ \langle x_1, \ldots, x_n, \top, \ldots \rangle \f$.
30
31This semantics has implication when joining or merging two elements.
32For instance, \f$ \langle 1 \rangle.\mathit{meet}(\langle \bot, 4 \rangle) \f$ will be equal to bottom, in that case represented by \f$ \langle \bot \rangle \f$.
33
34Template parameters:
35 - `U` is the type of the abstract universe.
36 - `Allocator` is the allocator of the underlying array of universes. */
37template<class U, class Allocator>
38class VStore {
39public:
40 using universe_type = U;
41 using local_universe = typename universe_type::local_type;
42 using allocator_type = Allocator;
44
45 template <class Alloc>
46 struct var_dom {
49 var_dom() = default;
50 var_dom(const var_dom<Alloc>&) = default;
51 CUDA explicit var_dom(const Alloc&) {}
53 template <class VarDom>
54 CUDA var_dom(const VarDom& other): avar(other.avar), dom(other.dom) {}
55 };
56
57 template <class Alloc>
58 using tell_type = battery::vector<var_dom<Alloc>, Alloc>;
59
60 template <class Alloc>
62
63 template <class Alloc = allocator_type>
64 using snapshot_type = battery::vector<local_universe, Alloc>;
65
66 constexpr static const bool is_abstract_universe = false;
67 constexpr static const bool sequential = universe_type::sequential;
68 constexpr static const bool is_totally_ordered = false;
69 constexpr static const bool preserve_bot = true;
70 constexpr static const bool preserve_top = true;
71 constexpr static const bool preserve_join = universe_type::preserve_join;
72 constexpr static const bool preserve_meet = universe_type::preserve_meet;
73 constexpr static const bool injective_concretization = universe_type::injective_concretization;
74 constexpr static const bool preserve_concrete_covers = universe_type::preserve_concrete_covers;
75 constexpr static const char* name = "VStore";
76
77 template<class U2, class Alloc2>
78 friend class VStore;
79
80private:
81 using store_type = battery::vector<universe_type, allocator_type>;
82 using memory_type = typename universe_type::memory_type;
83
84 AType atype;
85 store_type data;
86 B<memory_type> is_at_bot;
87
88public:
89 CUDA VStore(const this_type& other)
90 : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
91 {}
92
93 /** Initialize an empty store. */
94 CUDA VStore(AType atype, const allocator_type& alloc = allocator_type())
95 : atype(atype), data(alloc), is_at_bot(false)
96 {}
97
98 CUDA VStore(AType atype, int size, const allocator_type& alloc = allocator_type())
99 : atype(atype), data(size, alloc), is_at_bot(false)
100 {}
101
102 template<class R>
104 : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
105 {}
106
107 template<class R, class Alloc2>
108 CUDA VStore(const VStore<R, Alloc2>& other, const allocator_type& alloc = allocator_type())
109 : atype(other.atype), data(other.data, alloc), is_at_bot(other.is_at_bot)
110 {}
111
112 /** Copy the vstore `other` in the current element.
113 * `deps` can be empty and is not used besides to get the allocator (since this abstract domain does not have dependencies). */
114 template<class R, class Alloc2, class... Allocators>
116 : VStore(other, deps.template get_allocator<allocator_type>()) {}
117
118 CUDA VStore(this_type&& other):
119 atype(other.atype), data(std::move(other.data)), is_at_bot(other.is_at_bot) {}
120
122 return data.get_allocator();
123 }
124
125 CUDA AType aty() const {
126 return atype;
127 }
128
129 /** Returns the number of variables currently represented by this abstract element. */
130 CUDA int vars() const {
131 return data.size();
132 }
133
134 CUDA static this_type top(AType atype = UNTYPED,
135 const allocator_type& alloc = allocator_type{})
136 {
137 return VStore(atype, alloc);
138 }
139
140 /** A special symbolic element representing top. */
141 CUDA static this_type bot(AType atype = UNTYPED,
142 const allocator_type& alloc = allocator_type{})
143 {
144 auto s = VStore{atype, alloc};
145 s.meet_bot();
146 return std::move(s);
147 }
148
149 template <class Env>
150 CUDA static this_type bot(Env& env,
151 const allocator_type& alloc = allocator_type{})
152 {
153 return bot(env.extends_abstract_dom(), alloc);
154 }
155
156 template <class Env>
157 CUDA static this_type top(Env& env,
158 const allocator_type& alloc = allocator_type{})
159 {
160 return top(env.extends_abstract_dom(), alloc);
161 }
162
163 /** \return `true` if at least one element is equal to bot in the store, `false` otherwise.
164 * @parallel @order-preserving @increasing
165 */
166 CUDA local::B is_bot() const {
167 return is_at_bot;
168 }
169
170 /** The bottom element of a store of `n` variables is when all variables are at bottom, or the store is empty.
171 * We do not expect to use this operation a lot, so its complexity is linear in the number of variables.
172 * @parallel @order-preserving @decreasing */
173 CUDA local::B is_top() const {
174 if(is_at_bot) { return false; }
175 for(int i = 0; i < vars(); ++i) {
176 if(!data[i].is_top()) {
177 return false;
178 }
179 }
180 return true;
181 }
182
183 /** Take a snapshot of the current variable store. */
184 template <class Alloc = allocator_type>
185 CUDA snapshot_type<Alloc> snapshot(const Alloc& alloc = Alloc()) const {
186 return snapshot_type<Alloc>(data, alloc);
187 }
188
189 template <class Alloc>
191 while(snap.size() < data.size()) {
192 data.pop_back();
193 }
194 is_at_bot.meet_bot();
195 for(int i = 0; i < snap.size(); ++i) {
196 data[i].join(snap[i]);
197 is_at_bot.join(data[i].is_bot());
198 }
199 return *this;
200 }
201
202private:
203 template <bool diagnose, class F, class Env, class Alloc2>
204 CUDA NI bool interpret_existential(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
205 assert(f.is(F::E));
206 var_dom<Alloc2> k;
207 if(local_universe::template interpret_tell<diagnose>(f, env, k.dom, diagnostics)) {
208 if(env.interpret(f.map_atype(atype), k.avar, diagnostics)) {
209 assert(k.avar.aty() == aty());
210 tell.push_back(k);
211 return true;
212 }
213 }
214 return false;
215 }
216
217 /** Interpret a predicate without variables. */
218 template <bool diagnose, class F, class Env, class Alloc2>
219 CUDA NI bool interpret_zero_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
220 if(f.is_true()) {
221 return true;
222 }
223 else if(f.is_false()) {
224 tell.push_back(var_dom<Alloc2>(AVar{}, U::bot()));
225 return true;
226 }
227 else {
228 RETURN_INTERPRETATION_ERROR("Only `true` and `false` can be interpreted in the store without being named.");
229 }
230 }
231
232 /** Interpret a predicate with a single variable occurrence. */
233 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
234 CUDA NI bool interpret_unary_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
236 bool res = local_universe::template interpret<kind, diagnose>(f, env, u, diagnostics);
237 if(res) {
238 const auto& varf = var_in(f);
239 // When it is not necessary, we try to avoid using the environment.
240 // This is for instance useful when deduction operators add new constraints but do not have access to the environment (e.g., split()), and to avoid passing the environment around everywhere.
241 if(varf.is(F::V)) {
242 if(varf.v().aty() == aty() || varf.v().aty() == UNTYPED) {
243 tell.push_back(var_dom<Alloc2>(varf.v(), u));
244 }
245 else {
246 RETURN_INTERPRETATION_ERROR("The variable was not declared in the current abstract element (but exists in other abstract elements).");
247 }
248 }
249 else {
250 auto var = var_in(f, env);
251 if(!var.has_value()) {
252 RETURN_INTERPRETATION_ERROR("Undeclared variable.");
253 }
254 auto avar = var->get().avar_of(atype);
255 if(!avar.has_value()) {
256 RETURN_INTERPRETATION_ERROR("The variable was not declared in the current abstract element (but exists in other abstract elements).");
257 }
258 assert(avar->aty() == aty());
259 tell.push_back(var_dom<Alloc2>(*avar, u));
260 }
261 return true;
262 }
263 else {
264 RETURN_INTERPRETATION_ERROR("Could not interpret a unary predicate in the underlying abstract universe.");
265 }
266 }
267
268 template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
269 CUDA NI bool interpret_predicate(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
270 if(f.type() != UNTYPED && f.type() != aty()) {
271 RETURN_INTERPRETATION_ERROR("The abstract type of this predicate does not match the one of the current abstract element.");
272 }
273 if constexpr(kind == IKind::TELL) {
274 if(f.is(F::E)) {
275 return interpret_existential<diagnose>(f, env, tell, diagnostics);
276 }
277 }
278 switch(num_vars(f)) {
279 case 0: return interpret_zero_predicate<diagnose>(f, env, tell, diagnostics);
280 case 1: return interpret_unary_predicate<kind, diagnose>(f, env, tell, diagnostics);
281 default: RETURN_INTERPRETATION_ERROR("Interpretation of n-ary predicate is not supported in VStore.");
282 }
283 }
284
285public:
286 template <IKind kind, bool diagnose = false, class F, class Env, class I>
287 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
288 if(f.is_untyped() || f.type() == aty()) {
289 return interpret_predicate<kind, diagnose>(f, env, intermediate, diagnostics);
290 }
291 else {
292 RETURN_INTERPRETATION_ERROR("This abstract element cannot interpret a formula with a different type.");
293 }
294 }
295
296 /** The store of variables lattice expects a formula with a single variable (including existential quantifiers) that can be handled by the abstract universe `U`.
297 *
298 * Variables must be existentially quantified before a formula containing variables can be interpreted.
299 * Variables are immediately assigned to an index of `VStore` and initialized to \f$ \top_U \f$.
300 * Shadowing/redeclaration of variables with existential quantifier is not supported.
301 * The variable mapping is added to the environment only if the interpretation succeeds.
302
303 * There is a small quirk: different stores might be produced if quantifiers do not appear in the same order.
304 * This is because we attribute the first available index to variables when interpreting the quantifier.
305 * In that case, the store will only be equivalent modulo the `env` structure.
306 */
307 template <bool diagnose = false, class F, class Env, class Alloc2>
308 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
309 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
310 }
311
312 /** Similar to `interpret_tell` but do not support existential quantifier and therefore leaves `env` unchanged. */
313 template <bool diagnose = false, class F, class Env, class Alloc2>
314 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
315 return const_cast<this_type*>(this)->interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
316 }
317
318 template <class Group, class Store>
319 CUDA void copy_to(Group& group, Store& store) const {
320 assert(vars() == store.vars());
321 if(group.thread_rank() == 0) {
322 store.is_at_bot = is_at_bot;
323 }
324 if(is_at_bot) {
325 return;
326 }
327 for (int i = group.thread_rank(); i < store.vars(); i += group.num_threads()) {
328 store.data[i] = data[i];
329 }
330 }
331
332 template <class Store>
333 CUDA void copy_to(Store& store) const {
334 assert(vars() == store.vars());
335 store.is_at_bot = is_at_bot;
336 if(is_at_bot) {
337 return;
338 }
339 for (int i = 0; i < store.vars(); ++i) {
340 store.data[i] = data[i];
341 }
342 }
343
344
345#ifdef __CUDACC__
346 void prefetch(int dstDevice) const {
347 if(!is_at_bot) {
348 cudaMemPrefetchAsync(data.data(), data.size() * sizeof(universe_type), dstDevice);
349 }
350 }
351#endif
352
353 /** Change the allocator of the underlying data, and reallocate the memory without copying the old data. */
354 CUDA void reset_data(allocator_type alloc) {
355 data = store_type(data.size(), alloc);
356 }
357
358 template <class Univ>
359 CUDA void project(AVar x, Univ& u) const {
360 u.meet(project(x));
361 }
362
363 CUDA const universe_type& project(AVar x) const {
364 assert(x.aty() == aty());
365 assert(x.vid() < data.size());
366 return data[x.vid()];
367 }
368
369 /** This projection must stay const, otherwise the user might tell new information in the universe, but we need to know in case we reach `top`. */
370 CUDA const universe_type& operator[](int x) const {
371 return data[x];
372 }
373
374 CUDA const universe_type& operator[](AVar x) const {
375 return project(x);
376 }
377
378 CUDA void meet_bot() {
379 is_at_bot.join_top();
380 }
381
382 /** Given an abstract variable `v`, `embed(VID(v), dom)` will update the domain of this variable with the new information `dom`.
383 * This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store.
384 * @parallel @order-preserving @increasing
385 */
386 CUDA bool embed(int x, const universe_type& dom) {
387 assert(x < data.size());
388 bool has_changed = data[x].meet(dom);
389 if(has_changed && data[x].is_bot()) {
390 is_at_bot.join_top();
391 }
392 return has_changed;
393 }
394
395 /** This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
396 CUDA bool embed(AVar x, const universe_type& dom) {
397 assert(x.aty() == aty());
398 return embed(x.vid(), dom);
399 }
400
401 /** This deduce method can grow the store if required, and therefore do not satisfy the PCCP model.
402 * @sequential @order-preserving @increasing
403 */
404 template <class Alloc2>
405 CUDA bool deduce(const tell_type<Alloc2>& t) {
406 if(t.size() == 0) {
407 return false;
408 }
409 int largest_vid = 0;
410 for(int i = 0; i < t.size(); ++i) {
411 if(t[i].avar == AVar{}) {
412 return is_at_bot.join(local::B(true));
413 }
414 largest_vid = battery::max(largest_vid, t[i].avar.vid());
415 }
416 if(largest_vid >= data.size()) {
417 data.resize(largest_vid+1);
418 }
419 bool has_changed = false;
420 for(int i = 0; i < t.size(); ++i) {
421 has_changed |= embed(t[i].avar, t[i].dom);
422 }
423 return has_changed;
424 }
425
426 /** Precondition: `other` must be smaller or equal in size than the current store. */
427 template <class U2, class Alloc2>
428 CUDA bool meet(const VStore<U2, Alloc2>& other) {
429 bool has_changed = is_at_bot.join(other.is_at_bot);
430 int min_size = battery::min(vars(), other.vars());
431 for(int i = 0; i < min_size; ++i) {
432 has_changed |= data[i].meet(other[i]);
433 }
434 for(int i = min_size; i < other.vars(); ++i) {
435 assert(other[i].is_top()); // the size of the current store cannot be modified.
436 }
437 return has_changed;
438 }
439
440 CUDA void join_top() {
441 is_at_bot.meet_bot();
442 for(int i = 0; i < data.size(); ++i) {
443 data[i].join_top();
444 }
445 }
446
447 /** Precondition: `other` must be smaller or equal in size than the current store. */
448 template <class U2, class Alloc2>
449 CUDA bool join(const VStore<U2, Alloc2>& other) {
450 if(other.is_bot()) {
451 return false;
452 }
453 int min_size = battery::min(vars(), other.vars());
454 bool has_changed = is_at_bot.meet(other.is_at_bot);
455 for(int i = 0; i < min_size; ++i) {
456 has_changed |= data[i].join(other[i]);
457 }
458 for(int i = min_size; i < vars(); ++i) {
459 has_changed |= data[i].join(U::top());
460 }
461 for(int i = min_size; i < other.vars(); ++i) {
462 assert(other[i].is_top());
463 }
464 return has_changed;
465 }
466
467 /** \return `true` when we can deduce the content of `t` from the current domain.
468 * For instance, if we have in the store `x = [0..10]`, we can deduce `x = [-1..11]` but we cannot deduce `x = [5..8]`.
469 * @parallel @order-preserving @decreasing */
470 template <class Alloc2>
471 CUDA local::B ask(const ask_type<Alloc2>& t) const {
472 for(int i = 0; i < t.size(); ++i) {
473 if(!(data[t[i].avar.vid()] <= t[i].dom)) {
474 return false;
475 }
476 }
477 return true;
478 }
479
480 CUDA int num_deductions() const { return 0; }
481 CUDA local::B deduce(int) const { assert(false); return false; }
482
483 /** An abstract element is extractable when it is not equal to bot.
484 * If the strategy is `atoms`, we check the domains are singleton.
485 */
486 template<class ExtractionStrategy = NonAtomicExtraction>
487 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
488 if(is_bot()) {
489 return false;
490 }
491 if constexpr(ExtractionStrategy::atoms) {
492 for(int i = 0; i < data.size(); ++i) {
493 if(data[i].lb().value() != data[i].ub().value()) {
494 return false;
495 }
496 }
497 }
498 return true;
499 }
500
501#ifdef __CUDACC__
502 template<class ExtractionStrategy = NonAtomicExtraction>
503 __device__ bool is_extractable(auto& group, const ExtractionStrategy& strategy = ExtractionStrategy()) const {
504 if(is_bot()) {
505 return false;
506 }
507 if constexpr(ExtractionStrategy::atoms) {
508 __shared__ bool res;
509 if(group.thread_rank() == 0) {
510 res = true;
511 }
512 group.sync();
513 for(int i = group.thread_rank(); i < data.size(); i += group.num_threads()) {
514 if(data[i].lb().value() != data[i].ub().value()) {
515 res = false;
516 }
517 }
518 group.sync();
519 return res;
520 }
521 else {
522 return true;
523 }
524 }
525#endif
526
527 /** Whenever `this` is different from `bot`, we extract its data into `ua`.
528 * \pre `is_extractable()` must be `true`.
529 * For now, we suppose VStore is only used to store under-approximation, I'm not sure yet how we would interact with over-approximation. */
530 template<class U2, class Alloc2>
531 CUDA void extract(VStore<U2, Alloc2>& ua) const {
532 if((void*)&ua != (void*)this) {
533 ua.data = data;
534 ua.is_at_bot.meet_bot();
535 }
536 }
537
538private:
539 template<class U2, class Env, class Allocator2>
540 CUDA TFormula<typename Env::allocator_type> deinterpret(AVar avar, const U2& dom, const Env& env, const Allocator2& allocator) const {
541 auto f = dom.deinterpret(avar, env, allocator);
542 f.type_as(aty());
543 map_avar_to_lvar(f, env);
544 return std::move(f);
545 }
546
547public:
548 template<class Env, class Allocator2 = typename Env::allocator_type>
549 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, const Allocator2& allocator = Allocator2()) const {
550 using F = TFormula<Allocator2>;
551 if(data.size() == 0) {
552 return is_bot() ? F::make_false() : F::make_true();
553 }
554 typename F::Sequence seq{allocator};
555 for(int i = 0; i < data.size(); ++i) {
556 AVar v(aty(), i);
557 seq.push_back(F::make_exists(aty(), env.name_of(v), env.sort_of(v)));
558 seq.push_back(deinterpret(AVar(aty(), i), data[i], env, allocator));
559 }
560 return F::make_nary(AND, std::move(seq), aty());
561 }
562
563 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
564 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, const Allocator2& allocator = Allocator2()) const {
565 using F = TFormula<Allocator2>;
566 if(intermediate.size() == 0) {
567 return F::make_true();
568 }
569 else if(intermediate.size() == 1) {
570 return deinterpret(intermediate[0].avar, intermediate[0].dom, env, allocator);
571 }
572 else {
573 typename F::Sequence seq{allocator};
574 for(int i = 0; i < intermediate.size(); ++i) {
575 seq.push_back(deinterpret(intermediate[i].avar, intermediate[i].dom, env, allocator));
576 }
577 return F::make_nary(AND, std::move(seq), aty());
578 }
579 }
580
581 CUDA void print() const {
582 if(is_bot()) {
583 printf("\u22A5 | ");
584 }
585 printf("<");
586 for(int i = 0; i < vars(); ++i) {
587 data[i].print();
588 printf("%s", (i+1 == vars() ? "" : ", "));
589 }
590 printf(">\n");
591 }
592};
593
594// Lattice operations.
595// Note that we do not consider the logical names.
596// These operations are only considering the indices of the elements.
597
598template<class L, class K, class Alloc>
599CUDA auto fmeet(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
600{
601 using U = decltype(fmeet(a[0], b[0]));
602 if(a.is_bot() || b.is_bot()) {
604 }
605 int max_size = battery::max(a.vars(), b.vars());
606 int min_size = battery::min(a.vars(), b.vars());
607 VStore<U, Alloc> res(UNTYPED, max_size, a.get_allocator());
608 for(int i = 0; i < min_size; ++i) {
609 res.embed(i, fmeet(a[i], b[i]));
610 }
611 for(int i = min_size; i < a.vars(); ++i) {
612 res.embed(i, a[i]);
613 }
614 for(int i = min_size; i < b.vars(); ++i) {
615 res.embed(i, b[i]);
616 }
617 return res;
618}
619
620template<class L, class K, class Alloc>
621CUDA auto fjoin(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
622{
623 using U = decltype(fjoin(a[0], b[0]));
624 if(a.is_bot()) {
625 if(b.is_bot()) {
627 }
628 else {
629 return VStore<U, Alloc>(b);
630 }
631 }
632 else if(b.is_bot()) {
633 return VStore<U, Alloc>(a);
634 }
635 else {
636 int min_size = battery::min(a.vars(), b.vars());
637 VStore<U, Alloc> res(UNTYPED, min_size, a.get_allocator());
638 for(int i = 0; i < min_size; ++i) {
639 res.embed(i, fjoin(a[i], b[i]));
640 }
641 return res;
642 }
643}
644
645template<class L, class K, class Alloc1, class Alloc2>
646CUDA bool operator<=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
647{
648 if(b.is_top()) {
649 return true;
650 }
651 else {
652 int min_size = battery::min(a.vars(), b.vars());
653 for(int i = 0; i < min_size; ++i) {
654 if(!(a[i] <= b[i])) {
655 return false;
656 }
657 }
658 for(int i = min_size; i < b.vars(); ++i) {
659 if(!b[i].is_top()) {
660 return false;
661 }
662 }
663 }
664 return true;
665}
666
667template<class L, class K, class Alloc1, class Alloc2>
668CUDA bool operator<(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
669{
670 if(a.is_bot()) {
671 return !b.is_bot();
672 }
673 else {
674 int min_size = battery::min(a.vars(), b.vars());
675 bool strict = false;
676 for(int i = 0; i < a.vars(); ++i) {
677 if(i < b.vars()) {
678 if(a[i] < b[i]) {
679 strict = true;
680 }
681 else if(!(a[i] <= b[i])) {
682 return false;
683 }
684 }
685 else if(!a[i].is_top()) {
686 strict = true;
687 break;
688 }
689 }
690 for(int i = min_size; i < b.vars(); ++i) {
691 if(!b[i].is_top()) {
692 return false;
693 }
694 }
695 return strict;
696 }
697}
698
699template<class L, class K, class Alloc1, class Alloc2>
700CUDA bool operator>=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
701{
702 return b <= a;
703}
704
705template<class L, class K, class Alloc1, class Alloc2>
706CUDA bool operator>(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
707{
708 return b < a;
709}
710
711template<class L, class K, class Alloc1, class Alloc2>
712CUDA bool operator==(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
713{
714 if(a.is_bot()) {
715 return b.is_bot();
716 }
717 else if(b.is_bot()) {
718 return false;
719 }
720 else {
721 int min_size = battery::min(a.vars(), b.vars());
722 for(int i = 0; i < min_size; ++i) {
723 if(a[i] != b[i]) {
724 return false;
725 }
726 }
727 for(int i = min_size; i < a.vars(); ++i) {
728 if(!a[i].is_top()) {
729 return false;
730 }
731 }
732 for(int i = min_size; i < b.vars(); ++i) {
733 if(!b[i].is_top()) {
734 return false;
735 }
736 }
737 }
738 return true;
739}
740
741template<class L, class K, class Alloc1, class Alloc2>
742CUDA bool operator!=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
743{
744 return !(a == b);
745}
746
747template<class L, class Alloc>
748std::ostream& operator<<(std::ostream &s, const VStore<L, Alloc> &vstore) {
749 if(vstore.is_bot()) {
750 s << "\u22A5: ";
751 }
752 else {
753 s << "<";
754 for(int i = 0; i < vstore.vars(); ++i) {
755 s << vstore[i] << (i+1 == vstore.vars() ? "" : ", ");
756 }
757 s << ">";
758 }
759 return s;
760}
761
762} // namespace lala
763
764#endif
Definition ast.hpp:38
CUDA constexpr int vid() const
Definition ast.hpp:69
CUDA constexpr int aty() const
Definition ast.hpp:65
Definition abstract_deps.hpp:28
Definition b.hpp:10
Definition diagnostics.hpp:19
Definition ast.hpp:286
CUDA void type_as(AType ty)
Definition ast.hpp:399
Definition vstore.hpp:38
battery::vector< var_dom< Alloc >, Alloc > tell_type
Definition vstore.hpp:58
static CUDA this_type top(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:134
CUDA int vars() const
Definition vstore.hpp:130
static constexpr const bool is_abstract_universe
Definition vstore.hpp:66
CUDA bool deduce(const tell_type< Alloc2 > &t)
Definition vstore.hpp:405
static constexpr const bool preserve_top
Definition vstore.hpp:70
CUDA local::B is_top() const
Definition vstore.hpp:173
static constexpr const bool preserve_bot
Definition vstore.hpp:69
CUDA bool join(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:449
CUDA bool meet(const VStore< U2, Alloc2 > &other)
Definition vstore.hpp:428
CUDA void copy_to(Group &group, Store &store) const
Definition vstore.hpp:319
static CUDA this_type bot(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:141
CUDA const universe_type & project(AVar x) const
Definition vstore.hpp:363
CUDA AType aty() const
Definition vstore.hpp:125
CUDA VStore(AType atype, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:94
static constexpr const bool sequential
Definition vstore.hpp:67
CUDA int num_deductions() const
Definition vstore.hpp:480
tell_type< Alloc > ask_type
Definition vstore.hpp:61
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition vstore.hpp:487
CUDA void print() const
Definition vstore.hpp:581
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:157
CUDA allocator_type get_allocator() const
Definition vstore.hpp:121
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition vstore.hpp:564
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition vstore.hpp:471
CUDA void project(AVar x, Univ &u) const
Definition vstore.hpp:359
CUDA void copy_to(Store &store) const
Definition vstore.hpp:333
CUDA void join_top()
Definition vstore.hpp:440
CUDA bool embed(AVar x, const universe_type &dom)
Definition vstore.hpp:396
battery::vector< local_universe, Alloc > snapshot_type
Definition vstore.hpp:64
CUDA local::B is_bot() const
Definition vstore.hpp:166
U universe_type
Definition vstore.hpp:40
static constexpr const bool preserve_join
Definition vstore.hpp:71
CUDA VStore(const VStore< R, allocator_type > &other)
Definition vstore.hpp:103
static constexpr const char * name
Definition vstore.hpp:75
CUDA snapshot_type< Alloc > snapshot(const Alloc &alloc=Alloc()) const
Definition vstore.hpp:185
typename universe_type::local_type local_universe
Definition vstore.hpp:41
friend class VStore
Definition vstore.hpp:78
CUDA bool embed(int x, const universe_type &dom)
Definition vstore.hpp:386
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition vstore.hpp:287
CUDA this_type & restore(const snapshot_type< Alloc > &snap)
Definition vstore.hpp:190
CUDA const universe_type & operator[](int x) const
Definition vstore.hpp:370
static constexpr const bool injective_concretization
Definition vstore.hpp:73
static constexpr const bool preserve_concrete_covers
Definition vstore.hpp:74
CUDA VStore(AType atype, int size, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:98
CUDA void meet_bot()
Definition vstore.hpp:378
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition vstore.hpp:314
CUDA VStore(this_type &&other)
Definition vstore.hpp:118
CUDA VStore(const this_type &other)
Definition vstore.hpp:89
static constexpr const bool is_totally_ordered
Definition vstore.hpp:68
CUDA local::B deduce(int) const
Definition vstore.hpp:481
CUDA const universe_type & operator[](AVar x) const
Definition vstore.hpp:374
CUDA void reset_data(allocator_type alloc)
Definition vstore.hpp:354
static constexpr const bool preserve_meet
Definition vstore.hpp:72
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition vstore.hpp:549
CUDA void extract(VStore< U2, Alloc2 > &ua) const
Definition vstore.hpp:531
CUDA VStore(const VStore< R, Alloc2 > &other, const allocator_type &alloc=allocator_type())
Definition vstore.hpp:108
CUDA VStore(const VStore< R, Alloc2 > &other, const AbstractDeps< Allocators... > &deps)
Definition vstore.hpp:115
Allocator allocator_type
Definition vstore.hpp:42
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition vstore.hpp:308
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type{})
Definition vstore.hpp:150
#define RETURN_INTERPRETATION_ERROR(MSG)
Definition diagnostics.hpp:155
Definition abstract_deps.hpp:14
std::ostream & operator<<(std::ostream &s, const CartesianProduct< A, As... > &cp)
Definition cartesian_product.hpp:531
CUDA NI void map_avar_to_lvar(F &f, const Env &env, bool erase_type=false)
Definition algorithm.hpp:438
CUDA constexpr auto fjoin(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:464
CUDA constexpr bool operator==(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:504
@ AND
Unary arithmetic function symbols.
Definition ast.hpp:114
int AType
Definition sort.hpp:18
CUDA constexpr bool operator<(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:485
CUDA constexpr auto fmeet(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:471
CUDA constexpr bool operator>(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition cartesian_product.hpp:498
CUDA NI int num_vars(const F &f)
Definition algorithm.hpp:154
CUDA const TFormula< Allocator, ExtendedSig > & var_in(const TFormula< Allocator, ExtendedSig > &f)
Definition algorithm.hpp:146
#define UNTYPED
Definition sort.hpp:21
Definition vstore.hpp:17
static constexpr bool atoms
Definition vstore.hpp:18
Definition vstore.hpp:13
static constexpr bool atoms
Definition vstore.hpp:14
Definition vstore.hpp:46
CUDA var_dom(const Alloc &)
Definition vstore.hpp:51
var_dom(const var_dom< Alloc > &)=default
CUDA var_dom(AVar avar, const local_universe &dom)
Definition vstore.hpp:52
AVar avar
Definition vstore.hpp:47
local_universe dom
Definition vstore.hpp:48
CUDA var_dom(const VarDom &other)
Definition vstore.hpp:54