Lattice Land Powerdomains Library
table.hpp
Go to the documentation of this file.
1 // Copyright 2023 Pierre Talbot
2 
3 #ifndef LALA_POWER_TABLES_HPP
4 #define LALA_POWER_TABLES_HPP
5 
6 #include "battery/vector.hpp"
7 #include "battery/shared_ptr.hpp"
8 #include "battery/dynamic_bitset.hpp"
9 #include "lala/logic/logic.hpp"
10 #include "lala/universes/primitive_upset.hpp"
11 #include "lala/abstract_deps.hpp"
12 
13 namespace lala {
14 
15 template <class A, class U, class Alloc> class Table;
16 namespace impl {
17  template <class>
18  struct is_table_like {
19  static constexpr bool value = false;
20  };
21  template<class A, class U, class Alloc>
22  struct is_table_like<Table<A, U, Alloc>> {
23  static constexpr bool value = true;
24  };
25 }
26 
27 /** The table abstract domain is designed to represent predicates in extension by listing all their solutions explicitly.
28  * It is inspired by the table global constraint and generalizes it by lifting each element of the table to a lattice element.
29  * We expect `U` to be equally or less expressive than `A::universe_type`, this is because we compute the meet in `A::universe_type` and not in `U`.
30  */
31 template <class A, class U = typename A::universe_type, class Allocator = typename A::allocator_type>
32 class Table {
33 public:
34  using allocator_type = Allocator;
35  using sub_allocator_type = typename A::allocator_type;
36  using universe_type = U;
37  using local_universe = typename universe_type::local_type;
38  using sub_universe_type = typename A::universe_type;
39  using sub_local_universe = typename sub_universe_type::local_type;
40  using memory_type = typename universe_type::memory_type;
41  using sub_type = A;
42  using sub_ptr = abstract_ptr<sub_type>;
44 
45  constexpr static const bool is_abstract_universe = false;
46  constexpr static const bool sequential = sub_type::sequential;
47  constexpr static const bool is_totally_ordered = false;
48  constexpr static const bool preserve_bot = sub_type::preserve_bot;
49  constexpr static const bool preserve_top = sub_type::preserve_top;
50  // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
51  constexpr static const bool preserve_join = sub_type::preserve_join;
52  constexpr static const bool preserve_meet = sub_type::preserve_meet;
53  constexpr static const bool injective_concretization = sub_type::injective_concretization;
54  constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
55  constexpr static const char* name = "Table";
56 
57  using table_type = battery::vector<universe_type, allocator_type>;
58  using bitset_type = battery::dynamic_bitset<memory_type, allocator_type>;
59 
60 private:
61  AType atype;
62  AType store_aty;
63  sub_ptr sub;
64 
65  // For each instance `i` of the table, we have its set of variables `headers[i]`.
66  battery::vector<battery::vector<AVar, allocator_type>, allocator_type> headers;
67  table_type tell_table;
68  table_type ask_table;
69  battery::vector<bitset_type, allocator_type> eliminated_rows;
70 
71  // We keep a bitset representation of each variable in the table.
72  // We perform a reduced product between this representation and the underlying domain.
73  battery::vector<bitset_type, allocator_type> bitset_store;
74 
75 public:
76  template <class Alloc>
77  struct tell_type {
78  using allocator_type = Alloc;
79 
80  typename A::template tell_type<Alloc> sub;
81 
82  battery::vector<battery::vector<AVar, Alloc>, Alloc> headers;
83 
84  battery::vector<universe_type, Alloc> tell_table;
85  battery::vector<universe_type, Alloc> ask_table;
86 
87  CUDA tell_type(const Alloc& alloc = Alloc{})
88  : sub(alloc)
89  , headers(alloc)
90  , tell_table(alloc)
91  , ask_table(alloc)
92  {}
93  tell_type(const tell_type&) = default;
94  tell_type(tell_type&&) = default;
95  tell_type& operator=(tell_type&&) = default;
96  tell_type& operator=(const tell_type&) = default;
97 
98  template <class TableTellType>
99  CUDA NI tell_type(const TableTellType& other, const Alloc& alloc = Alloc{})
100  : sub(other.sub, alloc)
101  , headers(other.headers, alloc)
102  , tell_table(other.tell_table, alloc)
103  , ask_table(other.ask_table, alloc)
104  {}
105 
107  return headers.get_allocator();
108  }
109 
110  template <class Alloc2>
111  friend class tell_type;
112  };
113 
114  template <class Alloc>
115  struct ask_type {
116  using allocator_type = Alloc;
117 
118  typename A::template ask_type<Alloc> sub;
119  battery::vector<battery::vector<AVar, Alloc>, Alloc> headers;
120  battery::vector<universe_type, Alloc> ask_table;
121 
122  CUDA ask_type(const Alloc& alloc = Alloc{})
123  : sub(alloc)
124  , headers(alloc)
125  , ask_table(alloc)
126  {}
127  ask_type(const ask_type&) = default;
128  ask_type(ask_type&&) = default;
129  ask_type& operator=(ask_type&&) = default;
130  ask_type& operator=(const ask_type&) = default;
131 
132  template <class TableAskType>
133  CUDA NI ask_type(const TableAskType& other, const Alloc& alloc = Alloc{})
134  : sub(other.sub, alloc)
135  , headers(other.headers, alloc)
136  , ask_table(other.ask_table, alloc)
137  {}
138 
140  return headers.get_allocator();
141  }
142 
143  template <class Alloc2>
144  friend class ask_type;
145  };
146 
147  template <class A2, class U2, class Alloc2>
148  friend class Table;
149 
150 public:
151  CUDA Table(AType uid, AType store_aty, sub_ptr sub, const allocator_type& alloc = allocator_type())
152  : atype(uid)
153  , store_aty(store_aty)
154  , sub(std::move(sub))
155  , headers(alloc)
156  , tell_table(alloc)
157  , ask_table(alloc)
158  , eliminated_rows(alloc)
159  {}
160 
161  CUDA Table(AType uid, sub_ptr sub, const allocator_type& alloc = allocator_type())
162  : Table(uid, sub->aty(), sub, alloc)
163  {}
164 
165  template<class A2, class U2, class Alloc2, class... Allocators>
166  CUDA NI Table(const Table<A2, U2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
167  : atype(other.atype)
168  , store_aty(other.store_aty)
169  , sub(deps.template clone<sub_type>(other.sub))
170  , headers(other.headers, deps.template get_allocator<allocator_type>())
171  , tell_table(other.tell_table, deps.template get_allocator<allocator_type>())
172  , ask_table(other.ask_table, deps.template get_allocator<allocator_type>())
173  , eliminated_rows(other.eliminated_rows, deps.template get_allocator<allocator_type>())
174  {}
175 
176  CUDA AType aty() const {
177  return atype;
178  }
179 
181  return headers.get_allocator();
182  }
183 
184  CUDA sub_ptr subdomain() const {
185  return sub;
186  }
187 
188  CUDA local::BDec is_bot() const {
189  return tell_table.size() == 0 && sub->is_bot();
190  }
191 
192  CUDA local::BInc is_top() const {
193  for(int i = 0; i < eliminated_rows.size(); ++i) {
194  if(eliminated_rows[i].count() == tell_table.size()) {
195  return true;
196  }
197  }
198  return sub->is_top();
199  }
200 
201  CUDA static this_type bot(AType atype = UNTYPED,
202  AType atype_sub = UNTYPED,
203  const allocator_type& alloc = allocator_type(),
204  const sub_allocator_type& sub_alloc = sub_allocator_type())
205  {
206  return Table{atype, battery::allocate_shared<sub_type>(alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
207  }
208 
209  /** A special symbolic element representing top. */
210  CUDA static this_type top(AType atype = UNTYPED,
211  AType atype_sub = UNTYPED,
212  const allocator_type& alloc = allocator_type(),
213  const sub_allocator_type& sub_alloc = sub_allocator_type())
214  {
215  return Table{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
216  }
217 
218  template <class Env>
219  CUDA static this_type bot(Env& env,
220  const allocator_type& alloc = allocator_type(),
221  const sub_allocator_type& sub_alloc = sub_allocator_type())
222  {
223  AType atype_sub = env.extends_abstract_dom();
224  AType atype = env.extends_abstract_dom();
225  return bot(atype, atype_sub, alloc, sub_alloc);
226  }
227 
228  template <class Env>
229  CUDA static this_type top(Env& env,
230  const allocator_type& alloc = allocator_type(),
231  const sub_allocator_type& sub_alloc = sub_allocator_type())
232  {
233  AType atype_sub = env.extends_abstract_dom();
234  AType atype = env.extends_abstract_dom();
235  return top(atype, atype_sub, alloc, sub_alloc);
236  }
237 
238  template <class Alloc2>
239  struct snapshot_type {
240  using sub_snap_type = sub_type::template snapshot_type<Alloc2>;
242  size_t num_tables;
243 
248 
249  template <class SnapshotType>
250  CUDA snapshot_type(const SnapshotType& other, const Alloc2& alloc = Alloc2())
251  : sub_snap(other.sub_snap, alloc)
252  , num_tables(other.num_tables)
253  {}
254 
256  : sub_snap(std::move(sub_snap))
258  {}
259  };
260 
261  template <class Alloc2 = allocator_type>
262  CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
263  return snapshot_type<Alloc2>(sub->snapshot(alloc), headers.size());
264  }
265 
266  template <class Alloc2>
267  CUDA void restore(const snapshot_type<Alloc2>& snap) {
268  sub->restore(snap.sub_snap);
269  headers.resize(snap.num_tables);
270  if(snap.num_tables == 0) {
271  tell_table.resize(0);
272  ask_table.resize(0);
273  }
274  eliminated_rows.resize(snap.num_tables);
275  for(int i = 0; i < eliminated_rows.size(); ++i) {
276  eliminated_rows[i].reset();
277  }
278  }
279 
280  template <class F>
281  CUDA void flatten_and(const F& f, typename F::Sequence& conjuncts) const {
282  if(f.is(F::Seq) && f.sig() == AND) {
283  for(int i = 0; i < f.seq().size(); ++i) {
284  flatten_and(f.seq(i), conjuncts);
285  }
286  }
287  else {
288  conjuncts.push_back(f);
289  }
290  }
291 
292  template <class F>
293  CUDA void flatten_or(const F& f, typename F::Sequence& disjuncts) const {
294  if(f.is(F::Seq) && f.sig() == OR) {
295  for(int i = 0; i < f.seq().size(); ++i) {
296  flatten_or(f.seq(i), disjuncts);
297  }
298  }
299  else {
300  typename F::Sequence conjuncts{disjuncts.get_allocator()};
301  flatten_and(f, conjuncts);
302  if(conjuncts.size() > 1) {
303  disjuncts.push_back(F::make_nary(AND, std::move(conjuncts)));
304  }
305  else {
306  disjuncts.push_back(std::move(conjuncts[0]));
307  }
308  }
309  }
310 
311  template <class F>
312  CUDA F flatten(const F& f, const typename F::allocator_type& alloc) const {
313  typename F::Sequence disjuncts{alloc};
314  flatten_or(f, disjuncts);
315  if(disjuncts.size() > 1) {
316  return F::make_nary(OR, std::move(disjuncts));
317  }
318  else {
319  return std::move(disjuncts[0]);
320  }
321  }
322 
323  template <IKind kind, bool diagnose = false, class F, class Env, class Alloc>
324  CUDA NI bool interpret_atom(
325  battery::vector<AVar, Alloc>& header,
326  battery::vector<battery::vector<local_universe, Alloc>, Alloc>& tell_table2,
327  battery::vector<battery::vector<local_universe, Alloc>, Alloc>& ask_table2,
328  const F& f, Env& env, IDiagnostics& diagnostics) const
329  {
330  if(num_vars(f) != 1) {
331  RETURN_INTERPRETATION_ERROR("Only unary formulas are supported in the cell of the table.");
332  }
333  else {
334  auto x_opt = var_in(f, env);
335  if(!x_opt.has_value() || !x_opt->get().avar_of(store_aty).has_value()) {
336  RETURN_INTERPRETATION_ERROR("Undeclared variable.");
337  }
338  AVar x = x_opt->get().avar_of(store_aty).value();
339  int idx = 0;
340  for(; idx < header.size() && header[idx] != x; ++idx) {}
341  // If it's a new variable not present in the previous rows, we add it in each row with bottom value.
342  if(idx == header.size()) {
343  header.push_back(x);
344  for(int i = 0; i < tell_table2.size(); ++i) {
345  if constexpr(kind == IKind::TELL) {
346  tell_table2[i].push_back(local_universe::bot());
347  }
348  ask_table2[i].push_back(local_universe::bot());
349  }
350  }
351  local_universe ask_u{local_universe::bot()};
352  if(ginterpret_in<IKind::ASK, diagnose>(f, env, ask_u, diagnostics)) {
353  ask_table2.back()[idx].tell(ask_u);
354  if constexpr(kind == IKind::TELL) {
355  local_universe tell_u{local_universe::bot()};
356  if(ginterpret_in<IKind::TELL, diagnose>(f, env, tell_u, diagnostics)) {
357  tell_table2.back()[idx].tell(tell_u);
358  }
359  else {
360  return false;
361  }
362  }
363  }
364  else {
365  return false;
366  }
367  }
368  return true;
369  }
370 
371  template <class Alloc1, class Alloc2>
372  CUDA battery::vector<local_universe, Alloc2> flatten_table(
373  const battery::vector<battery::vector<local_universe, Alloc1>, Alloc1>& table, const Alloc2& alloc) const
374  {
375  if(table.size() == 0) { return battery::vector<local_universe, Alloc2>(alloc);}
376  battery::vector<local_universe, Alloc2> res(alloc);
377  res.reserve(table.size() * table[0].size());
378  for(int i = 0; i < table.size(); ++i) {
379  for(int j = 0; j < table[i].size(); ++j) {
380  res.push_back(table[i][j]);
381  }
382  }
383  return std::move(res);
384  }
385 
386 public:
387  template <IKind kind, bool diagnose = false, class F, class Env, class I>
388  CUDA NI bool interpret(const F& f2, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
389  F f = flatten(f2, env.get_allocator());
390  using Alloc = typename I::allocator_type;
391  if(f.is(F::Seq) && f.sig() == OR) {
392  battery::vector<AVar, Alloc> header(intermediate.get_allocator());
393  battery::vector<battery::vector<local_universe, Alloc>, Alloc> tell_table2(intermediate.get_allocator());
394  battery::vector<battery::vector<local_universe, Alloc>, Alloc> ask_table2(intermediate.get_allocator());
395  for(int i = 0; i < f.seq().size(); ++i) {
396  // Add a row in the table.
397  tell_table2.push_back(battery::vector<local_universe, Alloc>(header.size(), local_universe::bot(), intermediate.get_allocator()));
398  ask_table2.push_back(battery::vector<local_universe, Alloc>(header.size(), local_universe::bot(), intermediate.get_allocator()));
399  if(f.seq(i).is(F::Seq) && f.seq(i).sig() == AND) {
400  const auto& row = f.seq(i).seq();
401  for(int j = 0; j < row.size(); ++j) {
402  size_t error_ctx = diagnostics.num_suberrors();
403  if(!interpret_atom<kind, diagnose>(header, tell_table2, ask_table2, row[j], env, diagnostics)) {
404  if(!sub->template interpret<kind, diagnose>(f2, env, intermediate.sub, diagnostics)) {
405  return false;
406  }
407  diagnostics.cut(error_ctx);
408  return true;
409  }
410  }
411  }
412  else {
413  size_t error_ctx = diagnostics.num_suberrors();
414  if(!interpret_atom<kind, diagnose>(header, tell_table2, ask_table2, f.seq(i), env, diagnostics)) {
415  if(!sub->template interpret<kind, diagnose>(f2, env, intermediate.sub, diagnostics)) {
416  return false;
417  }
418  diagnostics.cut(error_ctx);
419  return true;
420  }
421  }
422  }
423  // When only one variable is present, we leave its interpretation to the subdomain.
424  if(header.size() > 1) {
425  auto ask_table2_flatten = flatten_table(ask_table2, ask_table2.get_allocator());
426  if((intermediate.ask_table.size() == 0 || intermediate.ask_table == ask_table2_flatten) &&
427  (ask_table.size() == 0 || ask_table == ask_table2_flatten))
428  {
429  intermediate.headers.push_back(std::move(header));
430  if(intermediate.ask_table.size() == 0 && ask_table.size() == 0) {
431  if constexpr(kind == IKind::TELL) {
432  intermediate.tell_table = flatten_table(tell_table2, tell_table2.get_allocator());
433  }
434  intermediate.ask_table = std::move(ask_table2_flatten);
435  }
436  return true;
437  }
438  else {
439  RETURN_INTERPRETATION_ERROR("This abstract domain can only represent multiple tables over a same matrix of values, but a difference was detected.");
440  }
441  }
442  }
443  return sub->template interpret<kind, diagnose>(f, env, intermediate.sub, diagnostics);
444  }
445 
446  template <bool diagnose = false, class F, class Env, class Alloc>
447  CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc>& ask, IDiagnostics& diagnostics) const {
448  return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
449  }
450 
451  template <bool diagnose = false, class F, class Env, class Alloc>
452  CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc>& tell, IDiagnostics& diagnostics) const {
453  return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
454  }
455 
456  CUDA const sub_universe_type& operator[](int x) const {
457  return (*sub)[x];
458  }
459 
460  CUDA size_t vars() const {
461  return sub->vars();
462  }
463 
464  CUDA size_t num_tables() const {
465  return headers.size();
466  }
467 
468 private:
469  template <IKind kind>
470  CUDA sub_local_universe convert(const local_universe& x) const {
471  if constexpr(std::is_same_v<universe_type, sub_universe_type>) {
472  return x;
473  }
474  else {
475  VarEnv<battery::standard_allocator> env;
476  IDiagnostics diagnostics;
477  sub_local_universe v{sub_local_universe::bot()};
478  bool succeed = ginterpret_in<kind>(x.deinterpret(AVar{}, env), env, v, diagnostics);
479  assert(succeed);
480  return v;
481  }
482  }
483 
484  CUDA size_t num_columns() const {
485  return headers[0].size();
486  }
487 
488  CUDA size_t num_rows() const {
489  return tell_table.size() / num_columns();
490  }
491 
492 public:
493  template <class Alloc, class Mem>
494  CUDA this_type& tell(const tell_type<Alloc>& t, BInc<Mem>& has_changed) {
495  if(t.headers.size() > 0) {
496  has_changed.tell_top();
497  }
498  sub->tell(t.sub, has_changed);
499  // If there is a table in the tell, we add it to the current abstract element.
500  if(t.tell_table.size() > 0) {
501  // Since this abstract element only handle one matrix of elements at a time, the current table must be empty.
502  assert(tell_table.size() == 0);
503  tell_table = t.tell_table;
504  ask_table = t.ask_table;
505  }
506  // For each new table (sharing the matrix of elements).
507  for(int i = 0; i < t.headers.size(); ++i) {
508  // Each table must have the same number of columns.
509  headers.push_back(battery::vector<AVar, allocator_type>(t.headers[i], get_allocator()));
510  eliminated_rows.push_back(bitset_type(num_rows(), get_allocator()));
511  }
512  return *this;
513  }
514 
515  template <class Alloc>
516  CUDA this_type& tell(const tell_type<Alloc>& t) {
517  local::BInc has_changed;
518  return tell(t, has_changed);
519  }
520 
521  CUDA this_type& tell(AVar x, const sub_universe_type& dom) {
522  sub->tell(x, dom);
523  return *this;
524  }
525 
526  template <class Mem>
527  CUDA this_type& tell(AVar x, const sub_universe_type& dom, BInc<Mem>& has_changed) {
528  sub->tell(x, dom, has_changed);
529  return *this;
530  }
531 
532  CUDA size_t to1D(int i, int j) const { return i * num_columns() + j; }
533 
534 private:
535  template <class Alloc>
536  CUDA local::BInc ask(const battery::vector<battery::vector<AVar, Alloc>, Alloc>& headers) const
537  {
538  for(int i = 0; i < headers.size(); ++i) {
539  bool row_entailed = false;
540  for(int j = 0; j < num_rows(); ++j) {
541  row_entailed = true;
542  for(int k = 0; k < num_columns(); ++k) {
543  if(!(sub->project(headers[i][k]) >= convert<IKind::ASK>(ask_table[to1D(j,k)]))) {
544  row_entailed = false;
545  break;
546  }
547  }
548  if(row_entailed) {
549  break;
550  }
551  }
552  if(!row_entailed) { return false; }
553  }
554  return true;
555  }
556 
557 public:
558  template <class Alloc>
559  CUDA local::BInc ask(const ask_type<Alloc>& a) const {
560  return ask(a.headers) && sub->ask(a.sub);
561  }
562 
563 private:
564  template <class Mem>
565  CUDA void refine(size_t table_num, size_t col, BInc<Mem>& has_changed) {
566  sub_local_universe u{sub_local_universe::top()};
567  for(int j = 0; j < num_rows(); ++j) {
568  auto r = convert<IKind::TELL>(tell_table[to1D(j,col)]);
569  if(!eliminated_rows[table_num].test(j)) {
570  if(join(r, sub->project(headers[table_num][col])).is_top()) {
571  eliminated_rows[table_num].set(j);
572  has_changed.tell_top();
573  }
574  else {
575  u.dtell(r);
576  }
577  }
578  }
579  sub->tell(headers[table_num][col], u, has_changed);
580  }
581 
582 public:
583  CUDA size_t num_refinements() const {
584  return
585  sub->num_refinements() +
586  headers.size() * num_columns();
587  }
588 
589  template <class Mem>
590  CUDA void refine(size_t i, BInc<Mem>& has_changed) {
591  assert(i < num_refinements());
592  if(i < sub->num_refinements()) {
593  sub->refine(i, has_changed);
594  }
595  else {
596  i -= sub->num_refinements();
597  // refine(i / num_columns(), i % num_columns(), has_changed);
598  refine(i % headers.size(), i / headers.size(), has_changed);
599  }
600  }
601 
602  template <class ExtractionStrategy = NonAtomicExtraction>
603  CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
604  // Check all remaining row are entailed.
605  return ask(headers) && sub->is_extractable(strategy);
606  }
607 
608  /** Extract an under-approximation if the last node popped \f$ a \f$ is an under-approximation.
609  * If `B` is a search tree, the under-approximation consists in a search tree \f$ \{a\} \f$ with a single node, in that case, `ua` must be different from `top`. */
610  template <class B>
611  CUDA void extract(B& ua) const {
612  if constexpr(impl::is_table_like<B>::value) {
613  sub->extract(*ua.sub);
614  }
615  else {
616  sub->extract(ua);
617  }
618  }
619 
620  CUDA sub_universe_type project(AVar x) const {
621  return sub->project(x);
622  }
623 
624  template<class Env>
625  CUDA NI TFormula<typename Env::allocator_type> deinterpret(const Env& env) const {
626  using F = TFormula<typename Env::allocator_type>;
627  F sub_f = sub->deinterpret(env);
628  typename F::Sequence seq{env.get_allocator()};
629  if(sub_f.is(F::Seq) && sub_f.sig() == AND) {
630  seq = std::move(sub_f.seq());
631  }
632  else {
633  seq.push_back(std::move(sub_f));
634  }
635  for(int i = 0; i < headers.size(); ++i) {
636  typename F::Sequence disjuncts{env.get_allocator()};
637  for(int j = 0; j < num_rows(); ++j) {
638  if(!eliminated_rows[i].test(j)) {
639  typename F::Sequence conjuncts{env.get_allocator()};
640  for(int k = 0; k < num_columns(); ++k) {
641  if(!(sub->project(headers[i][k]) >= convert<IKind::ASK>(ask_table[to1D(j,k)]))) {
642  conjuncts.push_back(tell_table[to1D(j,k)].deinterpret(headers[i][k], env));
643  }
644  }
645  disjuncts.push_back(F::make_nary(AND, std::move(conjuncts), aty()));
646  }
647  }
648  seq.push_back(F::make_nary(OR, std::move(disjuncts), aty()));
649  }
650  return F::make_nary(AND, std::move(seq));
651  }
652 };
653 
654 }
655 
656 #endif
Definition: table.hpp:32
battery::vector< universe_type, allocator_type > table_type
Definition: table.hpp:57
CUDA NI Table(const Table< A2, U2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition: table.hpp:166
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: table.hpp:229
CUDA NI bool interpret_atom(battery::vector< AVar, Alloc > &header, battery::vector< battery::vector< local_universe, Alloc >, Alloc > &tell_table2, battery::vector< battery::vector< local_universe, Alloc >, Alloc > &ask_table2, const F &f, Env &env, IDiagnostics &diagnostics) const
Definition: table.hpp:324
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition: table.hpp:262
CUDA this_type & tell(const tell_type< Alloc > &t, BInc< Mem > &has_changed)
Definition: table.hpp:494
CUDA Table(AType uid, AType store_aty, sub_ptr sub, const allocator_type &alloc=allocator_type())
Definition: table.hpp:151
CUDA this_type & tell(AVar x, const sub_universe_type &dom, BInc< Mem > &has_changed)
Definition: table.hpp:527
constexpr static const bool preserve_concrete_covers
Definition: table.hpp:54
CUDA size_t vars() const
Definition: table.hpp:460
constexpr static const bool injective_concretization
Definition: table.hpp:53
constexpr static const bool is_totally_ordered
Definition: table.hpp:47
CUDA this_type & tell(AVar x, const sub_universe_type &dom)
Definition: table.hpp:521
Allocator allocator_type
Definition: table.hpp:34
CUDA sub_universe_type project(AVar x) const
Definition: table.hpp:620
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: table.hpp:210
abstract_ptr< sub_type > sub_ptr
Definition: table.hpp:42
typename sub_universe_type::local_type sub_local_universe
Definition: table.hpp:39
CUDA sub_ptr subdomain() const
Definition: table.hpp:184
CUDA size_t num_tables() const
Definition: table.hpp:464
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: table.hpp:201
CUDA this_type & tell(const tell_type< Alloc > &t)
Definition: table.hpp:516
constexpr static const bool preserve_join
Definition: table.hpp:51
typename universe_type::memory_type memory_type
Definition: table.hpp:40
CUDA NI TFormula< typename Env::allocator_type > deinterpret(const Env &env) const
Definition: table.hpp:625
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc > &tell, IDiagnostics &diagnostics) const
Definition: table.hpp:452
CUDA size_t to1D(int i, int j) const
Definition: table.hpp:532
CUDA local::BInc is_top() const
Definition: table.hpp:192
battery::dynamic_bitset< memory_type, allocator_type > bitset_type
Definition: table.hpp:58
typename A::allocator_type sub_allocator_type
Definition: table.hpp:35
typename universe_type::local_type local_universe
Definition: table.hpp:37
CUDA allocator_type get_allocator() const
Definition: table.hpp:180
typename A::universe_type sub_universe_type
Definition: table.hpp:38
constexpr static const bool preserve_bot
Definition: table.hpp:48
CUDA local::BInc ask(const ask_type< Alloc > &a) const
Definition: table.hpp:559
constexpr static const bool preserve_meet
Definition: table.hpp:52
CUDA NI bool interpret(const F &f2, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition: table.hpp:388
CUDA const sub_universe_type & operator[](int x) const
Definition: table.hpp:456
A sub_type
Definition: table.hpp:41
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition: table.hpp:603
constexpr static const bool preserve_top
Definition: table.hpp:49
CUDA local::BDec is_bot() const
Definition: table.hpp:188
constexpr static const char * name
Definition: table.hpp:55
CUDA AType aty() const
Definition: table.hpp:176
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition: table.hpp:219
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc > &ask, IDiagnostics &diagnostics) const
Definition: table.hpp:447
CUDA Table(AType uid, sub_ptr sub, const allocator_type &alloc=allocator_type())
Definition: table.hpp:161
constexpr static const bool sequential
Definition: table.hpp:46
CUDA void flatten_or(const F &f, typename F::Sequence &disjuncts) const
Definition: table.hpp:293
CUDA void refine(size_t i, BInc< Mem > &has_changed)
Definition: table.hpp:590
CUDA size_t num_refinements() const
Definition: table.hpp:583
U universe_type
Definition: table.hpp:36
constexpr static const bool is_abstract_universe
Definition: table.hpp:45
CUDA void extract(B &ua) const
Definition: table.hpp:611
CUDA battery::vector< local_universe, Alloc2 > flatten_table(const battery::vector< battery::vector< local_universe, Alloc1 >, Alloc1 > &table, const Alloc2 &alloc) const
Definition: table.hpp:372
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition: table.hpp:267
CUDA void flatten_and(const F &f, typename F::Sequence &conjuncts) const
Definition: table.hpp:281
CUDA F flatten(const F &f, const typename F::allocator_type &alloc) const
Definition: table.hpp:312
Definition: bab.hpp:13
Definition: table.hpp:115
ask_type & operator=(ask_type &&)=default
ask_type(ask_type &&)=default
CUDA NI ask_type(const TableAskType &other, const Alloc &alloc=Alloc{})
Definition: table.hpp:133
Alloc allocator_type
Definition: table.hpp:116
CUDA ask_type(const Alloc &alloc=Alloc{})
Definition: table.hpp:122
battery::vector< battery::vector< AVar, Alloc >, Alloc > headers
Definition: table.hpp:119
ask_type & operator=(const ask_type &)=default
battery::vector< universe_type, Alloc > ask_table
Definition: table.hpp:120
ask_type(const ask_type &)=default
CUDA allocator_type get_allocator() const
Definition: table.hpp:139
A::template ask_type< Alloc > sub
Definition: table.hpp:118
Definition: table.hpp:239
snapshot_type< Alloc2 > & operator=(snapshot_type< Alloc2 > &&)=default
sub_snap_type sub_snap
Definition: table.hpp:241
CUDA snapshot_type(sub_snap_type &&sub_snap, size_t num_tables)
Definition: table.hpp:255
snapshot_type< Alloc2 > & operator=(const snapshot_type< Alloc2 > &)=default
sub_type::template snapshot_type< Alloc2 > sub_snap_type
Definition: table.hpp:240
snapshot_type(snapshot_type< Alloc2 > &&)=default
CUDA snapshot_type(const SnapshotType &other, const Alloc2 &alloc=Alloc2())
Definition: table.hpp:250
snapshot_type(const snapshot_type< Alloc2 > &)=default
size_t num_tables
Definition: table.hpp:242
Definition: table.hpp:77
tell_type & operator=(tell_type &&)=default
A::template tell_type< Alloc > sub
Definition: table.hpp:80
tell_type & operator=(const tell_type &)=default
tell_type(const tell_type &)=default
CUDA NI tell_type(const TableTellType &other, const Alloc &alloc=Alloc{})
Definition: table.hpp:99
Alloc allocator_type
Definition: table.hpp:78
battery::vector< universe_type, Alloc > ask_table
Definition: table.hpp:85
tell_type(tell_type &&)=default
CUDA allocator_type get_allocator() const
Definition: table.hpp:106
battery::vector< battery::vector< AVar, Alloc >, Alloc > headers
Definition: table.hpp:82
battery::vector< universe_type, Alloc > tell_table
Definition: table.hpp:84
CUDA tell_type(const Alloc &alloc=Alloc{})
Definition: table.hpp:87