Lattice Land Core Library
vstore.hpp
Go to the documentation of this file.
1 // Copyright 2021 Pierre Talbot
2 
3 #ifndef LALA_CORE_VSTORE_HPP
4 #define LALA_CORE_VSTORE_HPP
5 
6 #include "logic/logic.hpp"
8 #include "abstract_deps.hpp"
9 #include <optional>
10 
11 namespace lala {
12 
14  static constexpr bool atoms = false;
15  };
16 
18  static constexpr bool atoms = true;
19  };
20 
21 /** The variable store abstract domain is a _domain transformer_ built on top of an abstract universe `U`.
22 Concretization function: \f$ \gamma(\rho) := \bigcap_{x \in \pi(\rho)} \gamma_{U_x}(\rho(x)) \f$.
23 The bot element is smashed and the equality between two stores is represented by the following equivalence relation, for two stores \f$ S \f$ and \f$ T \f$:
24 \f$ S \equiv T \Leftrightarrow \forall{x \in \mathit{Vars}},~S(x) = T(x) \lor \exists{x \in \mathit{dom}(S)},\exists{y \in \mathit{dom}(T)},~S(x) = \bot \land T(y) = \bot \f$.
25 Intuitively, it means that either all elements are equal or both stores have a bot element, in which case they "collapse" to the bot element, and are considered equal.
26 
27 The top element is the element \f$ \langle \top, \ldots \rangle \f$, that is an infinite number of variables initialized to top.
28 In practice, we cannot represent infinite collections, so we represent top either as the empty collection or with a finite number of top elements.
29 Any finite store \f$ \langle x_1, \ldots, x_n \rangle \f$ should be seen as the concrete store \f$ \langle x_1, \ldots, x_n, \top, \ldots \rangle \f$.
30 
31 This semantics has implication when joining or merging two elements.
32 For instance, \f$ \langle 1 \rangle.\mathit{meet}(\langle \bot, 4 \rangle) \f$ will be equal to bottom, in that case represented by \f$ \langle \bot \rangle \f$.
33 
34 Template parameters:
35  - `U` is the type of the abstract universe.
36  - `Allocator` is the allocator of the underlying array of universes. */
37 template<class U, class Allocator>
38 class VStore {
39 public:
40  using universe_type = U;
41  using local_universe = typename universe_type::local_type;
42  using allocator_type = Allocator;
44 
45  template <class Alloc>
46  struct var_dom {
49  var_dom() = default;
50  var_dom(const var_dom<Alloc>&) = default;
51  CUDA explicit var_dom(const Alloc&) {}
53  template <class VarDom>
54  CUDA var_dom(const VarDom& other): avar(other.avar), dom(other.dom) {}
55  };
56 
57  template <class Alloc>
58  using tell_type = battery::vector<var_dom<Alloc>, Alloc>;
59 
60  template <class Alloc>
62 
63  template <class Alloc = allocator_type>
64  using snapshot_type = battery::vector<local_universe, Alloc>;
65 
66  constexpr static const bool is_abstract_universe = false;
67  constexpr static const bool sequential = universe_type::sequential;
68  constexpr static const bool is_totally_ordered = false;
69  constexpr static const bool preserve_bot = true;
70  constexpr static const bool preserve_top = true;
71  constexpr static const bool preserve_join = universe_type::preserve_join;
72  constexpr static const bool preserve_meet = universe_type::preserve_meet;
73  constexpr static const bool injective_concretization = universe_type::injective_concretization;
74  constexpr static const bool preserve_concrete_covers = universe_type::preserve_concrete_covers;
75  constexpr static const char* name = "VStore";
76 
77  template<class U2, class Alloc2>
78  friend class VStore;
79 
80 private:
81  using store_type = battery::vector<universe_type, allocator_type>;
82  using memory_type = typename universe_type::memory_type;
83 
84  AType atype;
85  store_type data;
86  B<memory_type> is_at_bot;
87 
88 public:
89  CUDA VStore(const this_type& other)
90  : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
91  {}
92 
93  /** Initialize an empty store. */
94  CUDA VStore(AType atype, const allocator_type& alloc = allocator_type())
95  : atype(atype), data(alloc), is_at_bot(false)
96  {}
97 
98  CUDA VStore(AType atype, int size, const allocator_type& alloc = allocator_type())
99  : atype(atype), data(size, alloc), is_at_bot(false)
100  {}
101 
102  template<class R>
104  : atype(other.atype), data(other.data), is_at_bot(other.is_at_bot)
105  {}
106 
107  template<class R, class Alloc2>
108  CUDA VStore(const VStore<R, Alloc2>& other, const allocator_type& alloc = allocator_type())
109  : atype(other.atype), data(other.data, alloc), is_at_bot(other.is_at_bot)
110  {}
111 
112  /** Copy the vstore `other` in the current element.
113  * `deps` can be empty and is not used besides to get the allocator (since this abstract domain does not have dependencies). */
114  template<class R, class Alloc2, class... Allocators>
115  CUDA VStore(const VStore<R, Alloc2>& other, const AbstractDeps<Allocators...>& deps)
116  : VStore(other, deps.template get_allocator<allocator_type>()) {}
117 
118  CUDA VStore(this_type&& other):
119  atype(other.atype), data(std::move(other.data)), is_at_bot(other.is_at_bot) {}
120 
122  return data.get_allocator();
123  }
124 
125  CUDA AType aty() const {
126  return atype;
127  }
128 
129  /** Returns the number of variables currently represented by this abstract element. */
130  CUDA int vars() const {
131  return data.size();
132  }
133 
134  CUDA static this_type top(AType atype = UNTYPED,
135  const allocator_type& alloc = allocator_type{})
136  {
137  return VStore(atype, alloc);
138  }
139 
140  /** A special symbolic element representing top. */
141  CUDA static this_type bot(AType atype = UNTYPED,
142  const allocator_type& alloc = allocator_type{})
143  {
144  auto s = VStore{atype, alloc};
145  s.meet_bot();
146  return std::move(s);
147  }
148 
149  template <class Env>
150  CUDA static this_type bot(Env& env,
151  const allocator_type& alloc = allocator_type{})
152  {
153  return bot(env.extends_abstract_dom(), alloc);
154  }
155 
156  template <class Env>
157  CUDA static this_type top(Env& env,
158  const allocator_type& alloc = allocator_type{})
159  {
160  return top(env.extends_abstract_dom(), alloc);
161  }
162 
163  /** \return `true` if at least one element is equal to bot in the store, `false` otherwise.
164  * @parallel @order-preserving @increasing
165  */
166  CUDA local::B is_bot() const {
167  return is_at_bot;
168  }
169 
170  /** The bottom element of a store of `n` variables is when all variables are at bottom, or the store is empty.
171  * We do not expect to use this operation a lot, so its complexity is linear in the number of variables.
172  * @parallel @order-preserving @decreasing */
173  CUDA local::B is_top() const {
174  if(is_at_bot) { return false; }
175  for(int i = 0; i < vars(); ++i) {
176  if(!data[i].is_top()) {
177  return false;
178  }
179  }
180  return true;
181  }
182 
183  /** Take a snapshot of the current variable store. */
184  template <class Alloc = allocator_type>
185  CUDA snapshot_type<Alloc> snapshot(const Alloc& alloc = Alloc()) const {
186  return snapshot_type<Alloc>(data, alloc);
187  }
188 
189  template <class Alloc>
191  while(snap.size() < data.size()) {
192  data.pop_back();
193  }
194  is_at_bot.meet_bot();
195  for(int i = 0; i < snap.size(); ++i) {
196  data[i].join(snap[i]);
197  is_at_bot.join(data[i].is_bot());
198  }
199  return *this;
200  }
201 
202 private:
203  template <bool diagnose, class F, class Env, class Alloc2>
204  CUDA NI bool interpret_existential(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
205  assert(f.is(F::E));
206  var_dom<Alloc2> k;
207  if(local_universe::template interpret_tell<diagnose>(f, env, k.dom, diagnostics)) {
208  if(env.interpret(f.map_atype(atype), k.avar, diagnostics)) {
209  tell.push_back(k);
210  return true;
211  }
212  }
213  return false;
214  }
215 
216  /** Interpret a predicate without variables. */
217  template <bool diagnose, class F, class Env, class Alloc2>
218  CUDA NI bool interpret_zero_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
219  if(f.is_true()) {
220  return true;
221  }
222  else if(f.is_false()) {
223  tell.push_back(var_dom<Alloc2>(AVar{}, U::bot()));
224  return true;
225  }
226  else {
227  RETURN_INTERPRETATION_ERROR("Only `true` and `false` can be interpreted in the store without being named.");
228  }
229  }
230 
231  /** Interpret a predicate with a single variable occurrence. */
232  template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
233  CUDA NI bool interpret_unary_predicate(const F& f, const Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
234  local_universe u;
235  bool res = local_universe::template interpret<kind, diagnose>(f, env, u, diagnostics);
236  if(res) {
237  const auto& varf = var_in(f);
238  // When it is not necessary, we try to avoid using the environment.
239  // This is for instance useful when deduction operators add new constraints but do not have access to the environment (e.g., split()), and to avoid passing the environment around everywhere.
240  if(varf.is(F::V)) {
241  tell.push_back(var_dom<Alloc2>(varf.v(), u));
242  }
243  else {
244  auto var = var_in(f, env);
245  if(!var.has_value()) {
246  RETURN_INTERPRETATION_ERROR("Undeclared variable.");
247  }
248  auto avar = var->get().avar_of(atype);
249  if(!avar.has_value()) {
250  RETURN_INTERPRETATION_ERROR("The variable was not declared in the current abstract element (but exists in other abstract elements).");
251  }
252  tell.push_back(var_dom<Alloc2>(*avar, u));
253  }
254  return true;
255  }
256  else {
257  RETURN_INTERPRETATION_ERROR("Could not interpret a unary predicate in the underlying abstract universe.");
258  }
259  }
260 
261  template <IKind kind, bool diagnose, class F, class Env, class Alloc2>
262  CUDA NI bool interpret_predicate(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
263  if(f.type() != UNTYPED && f.type() != aty()) {
264  RETURN_INTERPRETATION_ERROR("The abstract type of this predicate does not match the one of the current abstract element.");
265  }
266  if constexpr(kind == IKind::TELL) {
267  if(f.is(F::E)) {
268  return interpret_existential<diagnose>(f, env, tell, diagnostics);
269  }
270  }
271  switch(num_vars(f)) {
272  case 0: return interpret_zero_predicate<diagnose>(f, env, tell, diagnostics);
273  case 1: return interpret_unary_predicate<kind, diagnose>(f, env, tell, diagnostics);
274  default: RETURN_INTERPRETATION_ERROR("Interpretation of n-ary predicate is not supported in VStore.");
275  }
276  }
277 
278 public:
279  template <IKind kind, bool diagnose = false, class F, class Env, class I>
280  CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
281  if(f.is_untyped() || f.type() == aty()) {
282  return interpret_predicate<kind, diagnose>(f, env, intermediate, diagnostics);
283  }
284  else {
285  RETURN_INTERPRETATION_ERROR("This abstract element cannot interpret a formula with a different type.");
286  }
287  }
288 
289  /** The store of variables lattice expects a formula with a single variable (including existential quantifiers) that can be handled by the abstract universe `U`.
290  *
291  * Variables must be existentially quantified before a formula containing variables can be interpreted.
292  * Variables are immediately assigned to an index of `VStore` and initialized to \f$ \top_U \f$.
293  * Shadowing/redeclaration of variables with existential quantifier is not supported.
294  * The variable mapping is added to the environment only if the interpretation succeeds.
295 
296  * There is a small quirk: different stores might be produced if quantifiers do not appear in the same order.
297  * This is because we attribute the first available index to variables when interpreting the quantifier.
298  * In that case, the store will only be equivalent modulo the `env` structure.
299  */
300  template <bool diagnose = false, class F, class Env, class Alloc2>
301  CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
302  return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
303  }
304 
305  /** Similar to `interpret_tell` but do not support existential quantifier and therefore leaves `env` unchanged. */
306  template <bool diagnose = false, class F, class Env, class Alloc2>
307  CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
308  return const_cast<this_type*>(this)->interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
309  }
310 
311  template <class Group, class Store>
312  CUDA void copy_to(Group& group, Store& store) const {
313  assert(vars() == store.vars());
314  if(group.thread_rank() == 0) {
315  store.is_at_bot = is_at_bot;
316  }
317  if(is_at_bot) {
318  return;
319  }
320  for (int i = group.thread_rank(); i < store.vars(); i += group.num_threads()) {
321  store.data[i] = data[i];
322  }
323  }
324 
325 #ifdef __CUDACC__
326  void prefetch(int dstDevice) const {
327  if(!is_at_bot) {
328  cudaMemPrefetchAsync(data.data(), data.size() * sizeof(universe_type), dstDevice);
329  }
330  }
331 #endif
332 
333  /** Change the allocator of the underlying data, and reallocate the memory without copying the old data. */
334  CUDA void reset_data(allocator_type alloc) {
335  data = store_type(data.size(), alloc);
336  }
337 
338  template <class Univ>
339  CUDA void project(AVar x, Univ& u) const {
340  u.meet(project(x));
341  }
342 
343  CUDA const universe_type& project(AVar x) const {
344  assert(x.aty() == aty());
345  assert(x.vid() < data.size());
346  return data[x.vid()];
347  }
348 
349  /** This projection must stay const, otherwise the user might tell new information in the universe, but we need to know in case we reach `top`. */
350  CUDA const universe_type& operator[](int x) const {
351  return data[x];
352  }
353 
354  CUDA const universe_type& operator[](AVar x) const {
355  return project(x);
356  }
357 
358  CUDA void meet_bot() {
359  is_at_bot.join_top();
360  }
361 
362  /** Given an abstract variable `v`, `embed(VID(v), dom)` will update the domain of this variable with the new information `dom`.
363  * This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store.
364  * @parallel @order-preserving @increasing
365  */
366  CUDA bool embed(int x, const universe_type& dom) {
367  assert(x < data.size());
368  bool has_changed = data[x].meet(dom);
369  if(has_changed && data[x].is_bot()) {
370  is_at_bot.join_top();
371  }
372  return has_changed;
373  }
374 
375  /** This `embed` method follows PCCP's model, but the variable `x` must already be initialized in the store. */
376  CUDA bool embed(AVar x, const universe_type& dom) {
377  assert(x.aty() == aty());
378  return embed(x.vid(), dom);
379  }
380 
381  /** This deduce method can grow the store if required, and therefore do not satisfy the PCCP model.
382  * @sequential @order-preserving @increasing
383  */
384  template <class Alloc2>
385  CUDA bool deduce(const tell_type<Alloc2>& t) {
386  if(t.size() == 0) {
387  return false;
388  }
389  if(t[0].avar == AVar{}) {
390  return is_at_bot.join(local::B(true));
391  }
392  int largest_vid = 0;
393  for(int i = 0; i < t.size(); ++i) {
394  largest_vid = battery::max(largest_vid, t[i].avar.vid());
395  }
396  if(largest_vid >= data.size()) {
397  data.resize(largest_vid+1);
398  }
399  bool has_changed = false;
400  for(int i = 0; i < t.size(); ++i) {
401  has_changed |= embed(t[i].avar, t[i].dom);
402  }
403  return has_changed;
404  }
405 
406  /** Precondition: `other` must be smaller or equal in size than the current store. */
407  template <class U2, class Alloc2>
408  CUDA bool meet(const VStore<U2, Alloc2>& other) {
409  bool has_changed = is_at_bot.join(other.is_at_bot);
410  int min_size = battery::min(vars(), other.vars());
411  for(int i = 0; i < min_size; ++i) {
412  has_changed |= data[i].meet(other[i]);
413  }
414  for(int i = min_size; i < other.vars(); ++i) {
415  assert(other[i].is_top()); // the size of the current store cannot be modified.
416  }
417  return has_changed;
418  }
419 
420  CUDA void join_top() {
421  is_at_bot.meet_bot();
422  for(int i = 0; i < data.size(); ++i) {
423  data[i].join_top();
424  }
425  }
426 
427  /** Precondition: `other` must be smaller or equal in size than the current store. */
428  template <class U2, class Alloc2>
429  CUDA bool join(const VStore<U2, Alloc2>& other) {
430  if(other.is_bot()) {
431  return false;
432  }
433  int min_size = battery::min(vars(), other.vars());
434  bool has_changed = is_at_bot.meet(other.is_at_bot);
435  for(int i = 0; i < min_size; ++i) {
436  has_changed |= data[i].join(other[i]);
437  }
438  for(int i = min_size; i < vars(); ++i) {
439  has_changed |= data[i].join(U::top());
440  }
441  for(int i = min_size; i < other.vars(); ++i) {
442  assert(other[i].is_top());
443  }
444  return has_changed;
445  }
446 
447  /** \return `true` when we can deduce the content of `t` from the current domain.
448  * For instance, if we have in the store `x = [0..10]`, we can deduce `x = [-1..11]` but we cannot deduce `x = [5..8]`.
449  * @parallel @order-preserving @decreasing */
450  template <class Alloc2>
451  CUDA local::B ask(const ask_type<Alloc2>& t) const {
452  for(int i = 0; i < t.size(); ++i) {
453  if(!(data[t[i].avar.vid()] <= t[i].dom)) {
454  return false;
455  }
456  }
457  return true;
458  }
459 
460  CUDA int num_deductions() const { return 0; }
461  CUDA local::B deduce(int) const { assert(false); return false; }
462 
463  /** An abstract element is extractable when it is not equal to bot.
464  * If the strategy is `atoms`, we check the domains are singleton.
465  */
466  template<class ExtractionStrategy = NonAtomicExtraction>
467  CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
468  if(is_bot()) {
469  return false;
470  }
471  if constexpr(ExtractionStrategy::atoms) {
472  for(int i = 0; i < data.size(); ++i) {
473  if(data[i].lb().value() != data[i].ub().value()) {
474  return false;
475  }
476  }
477  }
478  return true;
479  }
480 
481 #ifdef __CUDACC__
482  template<class ExtractionStrategy = NonAtomicExtraction>
483  __device__ bool is_extractable(auto& group, const ExtractionStrategy& strategy = ExtractionStrategy()) const {
484  if(is_bot()) {
485  return false;
486  }
487  if constexpr(ExtractionStrategy::atoms) {
488  __shared__ bool res;
489  if(group.thread_rank() == 0) {
490  res = true;
491  }
492  group.sync();
493  for(int i = group.thread_rank(); i < data.size(); i += group.num_threads()) {
494  if(data[i].lb().value() != data[i].ub().value()) {
495  res = false;
496  }
497  }
498  group.sync();
499  return res;
500  }
501  else {
502  return true;
503  }
504  }
505 #endif
506 
507  /** Whenever `this` is different from `bot`, we extract its data into `ua`.
508  * \pre `is_extractable()` must be `true`.
509  * For now, we suppose VStore is only used to store under-approximation, I'm not sure yet how we would interact with over-approximation. */
510  template<class U2, class Alloc2>
511  CUDA void extract(VStore<U2, Alloc2>& ua) const {
512  if((void*)&ua != (void*)this) {
513  ua.data = data;
514  ua.is_at_bot.meet_bot();
515  }
516  }
517 
518 private:
519  template<class U2, class Env, class Allocator2>
520  CUDA TFormula<typename Env::allocator_type> deinterpret(AVar avar, const U2& dom, const Env& env, const Allocator2& allocator) const {
521  auto f = dom.deinterpret(avar, env, allocator);
522  f.type_as(aty());
523  map_avar_to_lvar(f, env);
524  return std::move(f);
525  }
526 
527 public:
528  template<class Env, class Allocator2 = typename Env::allocator_type>
529  CUDA NI TFormula<Allocator2> deinterpret(const Env& env, const Allocator2& allocator = Allocator2()) const {
530  using F = TFormula<Allocator2>;
531  if(data.size() == 0) {
532  return is_bot() ? F::make_false() : F::make_true();
533  }
534  typename F::Sequence seq{allocator};
535  for(int i = 0; i < data.size(); ++i) {
536  AVar v(aty(), i);
537  seq.push_back(F::make_exists(aty(), env.name_of(v), env.sort_of(v)));
538  seq.push_back(deinterpret(AVar(aty(), i), data[i], env, allocator));
539  }
540  return F::make_nary(AND, std::move(seq), aty());
541  }
542 
543  template<class I, class Env, class Allocator2 = typename Env::allocator_type>
544  CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, const Allocator2& allocator = Allocator2()) const {
545  using F = TFormula<Allocator2>;
546  if(intermediate.size() == 0) {
547  return F::make_true();
548  }
549  else if(intermediate.size() == 1) {
550  return deinterpret(intermediate[0].avar, intermediate[0].dom, env, allocator);
551  }
552  else {
553  typename F::Sequence seq{allocator};
554  for(int i = 0; i < intermediate.size(); ++i) {
555  seq.push_back(deinterpret(intermediate[i].avar, intermediate[i].dom, env, allocator));
556  }
557  return F::make_nary(AND, std::move(seq), aty());
558  }
559  }
560 
561  CUDA void print() const {
562  if(is_bot()) {
563  printf("\u22A5 | ");
564  }
565  printf("<");
566  for(int i = 0; i < vars(); ++i) {
567  data[i].print();
568  printf("%s", (i+1 == vars() ? "" : ", "));
569  }
570  printf(">\n");
571  }
572 };
573 
574 // Lattice operations.
575 // Note that we do not consider the logical names.
576 // These operations are only considering the indices of the elements.
577 
578 template<class L, class K, class Alloc>
579 CUDA auto fmeet(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
580 {
581  using U = decltype(fmeet(a[0], b[0]));
582  if(a.is_bot() || b.is_bot()) {
584  }
585  int max_size = battery::max(a.vars(), b.vars());
586  int min_size = battery::min(a.vars(), b.vars());
587  VStore<U, Alloc> res(UNTYPED, max_size, a.get_allocator());
588  for(int i = 0; i < min_size; ++i) {
589  res.embed(i, fmeet(a[i], b[i]));
590  }
591  for(int i = min_size; i < a.vars(); ++i) {
592  res.embed(i, a[i]);
593  }
594  for(int i = min_size; i < b.vars(); ++i) {
595  res.embed(i, b[i]);
596  }
597  return res;
598 }
599 
600 template<class L, class K, class Alloc>
601 CUDA auto fjoin(const VStore<L, Alloc>& a, const VStore<K, Alloc>& b)
602 {
603  using U = decltype(fjoin(a[0], b[0]));
604  if(a.is_bot()) {
605  if(b.is_bot()) {
607  }
608  else {
609  return VStore<U, Alloc>(b);
610  }
611  }
612  else if(b.is_bot()) {
613  return VStore<U, Alloc>(a);
614  }
615  else {
616  int min_size = battery::min(a.vars(), b.vars());
617  VStore<U, Alloc> res(UNTYPED, min_size, a.get_allocator());
618  for(int i = 0; i < min_size; ++i) {
619  res.embed(i, fjoin(a[i], b[i]));
620  }
621  return res;
622  }
623 }
624 
625 template<class L, class K, class Alloc1, class Alloc2>
626 CUDA bool operator<=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
627 {
628  if(b.is_top()) {
629  return true;
630  }
631  else {
632  int min_size = battery::min(a.vars(), b.vars());
633  for(int i = 0; i < min_size; ++i) {
634  if(!(a[i] <= b[i])) {
635  return false;
636  }
637  }
638  for(int i = min_size; i < b.vars(); ++i) {
639  if(!b[i].is_top()) {
640  return false;
641  }
642  }
643  }
644  return true;
645 }
646 
647 template<class L, class K, class Alloc1, class Alloc2>
648 CUDA bool operator<(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
649 {
650  if(a.is_bot()) {
651  return !b.is_bot();
652  }
653  else {
654  int min_size = battery::min(a.vars(), b.vars());
655  bool strict = false;
656  for(int i = 0; i < a.vars(); ++i) {
657  if(i < b.vars()) {
658  if(a[i] < b[i]) {
659  strict = true;
660  }
661  else if(!(a[i] <= b[i])) {
662  return false;
663  }
664  }
665  else if(!a[i].is_top()) {
666  strict = true;
667  break;
668  }
669  }
670  for(int i = min_size; i < b.vars(); ++i) {
671  if(!b[i].is_top()) {
672  return false;
673  }
674  }
675  return strict;
676  }
677 }
678 
679 template<class L, class K, class Alloc1, class Alloc2>
680 CUDA bool operator>=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
681 {
682  return b <= a;
683 }
684 
685 template<class L, class K, class Alloc1, class Alloc2>
686 CUDA bool operator>(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
687 {
688  return b < a;
689 }
690 
691 template<class L, class K, class Alloc1, class Alloc2>
692 CUDA bool operator==(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
693 {
694  if(a.is_bot()) {
695  return b.is_bot();
696  }
697  else if(b.is_bot()) {
698  return false;
699  }
700  else {
701  int min_size = battery::min(a.vars(), b.vars());
702  for(int i = 0; i < min_size; ++i) {
703  if(a[i] != b[i]) {
704  return false;
705  }
706  }
707  for(int i = min_size; i < a.vars(); ++i) {
708  if(!a[i].is_top()) {
709  return false;
710  }
711  }
712  for(int i = min_size; i < b.vars(); ++i) {
713  if(!b[i].is_top()) {
714  return false;
715  }
716  }
717  }
718  return true;
719 }
720 
721 template<class L, class K, class Alloc1, class Alloc2>
722 CUDA bool operator!=(const VStore<L, Alloc1>& a, const VStore<K, Alloc2>& b)
723 {
724  return !(a == b);
725 }
726 
727 template<class L, class Alloc>
728 std::ostream& operator<<(std::ostream &s, const VStore<L, Alloc> &vstore) {
729  if(vstore.is_bot()) {
730  s << "\u22A5: ";
731  }
732  else {
733  s << "<";
734  for(int i = 0; i < vstore.vars(); ++i) {
735  s << vstore[i] << (i+1 == vstore.vars() ? "" : ", ");
736  }
737  s << ">";
738  }
739  return s;
740 }
741 
742 } // namespace lala
743 
744 #endif
Definition: ast.hpp:38
constexpr CUDA int aty() const
Definition: ast.hpp:65
constexpr CUDA int vid() const
Definition: ast.hpp:69
Definition: abstract_deps.hpp:28
Definition: diagnostics.hpp:19
Definition: ast.hpp:247
CUDA void type_as(AType ty)
Definition: ast.hpp:360
Definition: vstore.hpp:38
battery::vector< var_dom< Alloc >, Alloc > tell_type
Definition: vstore.hpp:58
static CUDA this_type top(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition: vstore.hpp:134
CUDA int vars() const
Definition: vstore.hpp:130
CUDA bool deduce(const tell_type< Alloc2 > &t)
Definition: vstore.hpp:385
CUDA local::B is_top() const
Definition: vstore.hpp:173
CUDA bool join(const VStore< U2, Alloc2 > &other)
Definition: vstore.hpp:429
CUDA bool meet(const VStore< U2, Alloc2 > &other)
Definition: vstore.hpp:408
CUDA void copy_to(Group &group, Store &store) const
Definition: vstore.hpp:312
static CUDA this_type bot(AType atype=UNTYPED, const allocator_type &alloc=allocator_type{})
Definition: vstore.hpp:141
CUDA AType aty() const
Definition: vstore.hpp:125
CUDA VStore(AType atype, const allocator_type &alloc=allocator_type())
Definition: vstore.hpp:94
constexpr static const bool is_totally_ordered
Definition: vstore.hpp:68
CUDA int num_deductions() const
Definition: vstore.hpp:460
tell_type< Alloc > ask_type
Definition: vstore.hpp:61
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition: vstore.hpp:467
CUDA void print() const
Definition: vstore.hpp:561
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type{})
Definition: vstore.hpp:157
CUDA allocator_type get_allocator() const
Definition: vstore.hpp:121
CUDA const universe_type & project(AVar x) const
Definition: vstore.hpp:343
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition: vstore.hpp:451
constexpr static const bool preserve_join
Definition: vstore.hpp:71
constexpr static const bool preserve_bot
Definition: vstore.hpp:69
CUDA void project(AVar x, Univ &u) const
Definition: vstore.hpp:339
CUDA const universe_type & operator[](int x) const
Definition: vstore.hpp:350
CUDA void join_top()
Definition: vstore.hpp:420
CUDA bool embed(AVar x, const universe_type &dom)
Definition: vstore.hpp:376
battery::vector< local_universe, Alloc > snapshot_type
Definition: vstore.hpp:64
CUDA local::B is_bot() const
Definition: vstore.hpp:166
U universe_type
Definition: vstore.hpp:40
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition: vstore.hpp:529
CUDA VStore(const VStore< R, allocator_type > &other)
Definition: vstore.hpp:103
typename universe_type::local_type local_universe
Definition: vstore.hpp:41
friend class VStore
Definition: vstore.hpp:78
CUDA bool embed(int x, const universe_type &dom)
Definition: vstore.hpp:366
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: vstore.hpp:280
CUDA VStore(AType atype, int size, const allocator_type &alloc=allocator_type())
Definition: vstore.hpp:98
CUDA void meet_bot()
Definition: vstore.hpp:358
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition: vstore.hpp:307
CUDA VStore(this_type &&other)
Definition: vstore.hpp:118
CUDA VStore(const this_type &other)
Definition: vstore.hpp:89
constexpr static const bool preserve_meet
Definition: vstore.hpp:72
CUDA this_type & restore(const snapshot_type< Alloc > &snap)
Definition: vstore.hpp:190
CUDA local::B deduce(int) const
Definition: vstore.hpp:461
constexpr static const char * name
Definition: vstore.hpp:75
CUDA void reset_data(allocator_type alloc)
Definition: vstore.hpp:334
constexpr static const bool preserve_concrete_covers
Definition: vstore.hpp:74
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, const Allocator2 &allocator=Allocator2()) const
Definition: vstore.hpp:544
CUDA void extract(VStore< U2, Alloc2 > &ua) const
Definition: vstore.hpp:511
constexpr static const bool preserve_top
Definition: vstore.hpp:70
CUDA VStore(const VStore< R, Alloc2 > &other, const allocator_type &alloc=allocator_type())
Definition: vstore.hpp:108
constexpr static const bool injective_concretization
Definition: vstore.hpp:73
constexpr static const bool sequential
Definition: vstore.hpp:67
CUDA const universe_type & operator[](AVar x) const
Definition: vstore.hpp:354
CUDA VStore(const VStore< R, Alloc2 > &other, const AbstractDeps< Allocators... > &deps)
Definition: vstore.hpp:115
CUDA snapshot_type< Alloc > snapshot(const Alloc &alloc=Alloc()) const
Definition: vstore.hpp:185
Allocator allocator_type
Definition: vstore.hpp:42
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition: vstore.hpp:301
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type{})
Definition: vstore.hpp:150
constexpr static const bool is_abstract_universe
Definition: vstore.hpp:66
#define RETURN_INTERPRETATION_ERROR(MSG)
Definition: diagnostics.hpp:155
Definition: abstract_deps.hpp:14
CUDA const TFormula< Allocator, ExtendedSig > & var_in(const TFormula< Allocator, ExtendedSig > &f)
Definition: algorithm.hpp:145
CUDA NI void map_avar_to_lvar(F &f, const Env &env, bool erase_type=false)
Definition: algorithm.hpp:416
constexpr CUDA bool operator==(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition: cartesian_product.hpp:504
@ AND
Unary arithmetic function symbols.
Definition: ast.hpp:114
constexpr CUDA auto fjoin(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition: cartesian_product.hpp:464
int AType
Definition: sort.hpp:18
std::ostream & operator<<(std::ostream &s, const CartesianProduct< A, As... > &cp)
Definition: cartesian_product.hpp:531
CUDA NI int num_vars(const F &f)
Definition: algorithm.hpp:153
constexpr CUDA bool operator<(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition: cartesian_product.hpp:485
constexpr CUDA auto fmeet(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition: cartesian_product.hpp:471
constexpr CUDA bool operator>(const CartesianProduct< As... > &a, const CartesianProduct< Bs... > &b)
Definition: cartesian_product.hpp:498
#define UNTYPED
Definition: sort.hpp:21
Definition: vstore.hpp:17
static constexpr bool atoms
Definition: vstore.hpp:18
Definition: vstore.hpp:13
static constexpr bool atoms
Definition: vstore.hpp:14
Definition: vstore.hpp:46
CUDA var_dom(const Alloc &)
Definition: vstore.hpp:51
var_dom(const var_dom< Alloc > &)=default
CUDA var_dom(AVar avar, const local_universe &dom)
Definition: vstore.hpp:52
AVar avar
Definition: vstore.hpp:47
local_universe dom
Definition: vstore.hpp:48
CUDA var_dom(const VarDom &other)
Definition: vstore.hpp:54