Lattice Land Powerdomains Library
Loading...
Searching...
No Matches
search_tree.hpp
Go to the documentation of this file.
1// Copyright 2022 Pierre Talbot
2
3#ifndef LALA_POWER_SEARCH_TREE_HPP
4#define LALA_POWER_SEARCH_TREE_HPP
5
6#include "battery/vector.hpp"
7#include "battery/shared_ptr.hpp"
8#include "lala/logic/logic.hpp"
9#include "lala/universes/arith_bound.hpp"
10#include "lala/abstract_deps.hpp"
11#include "lala/vstore.hpp"
12
13#include "split_strategy.hpp"
14
15namespace lala {
16template <class A, class S, class Allocator> class SearchTree;
17namespace impl {
18 template <class>
19 struct is_search_tree_like {
20 static constexpr bool value = false;
21 };
22 template<class A, class S, class Alloc>
23 struct is_search_tree_like<SearchTree<A, S, Alloc>> {
24 static constexpr bool value = true;
25 };
26}
27
28template <class A, class Split, class Allocator = typename A::allocator_type>
30public:
31 using allocator_type = Allocator;
32 using sub_allocator_type = typename A::allocator_type;
33 using split_type = Split;
34 using branch_type = typename split_type::branch_type;
35 using universe_type = typename A::universe_type;
36 using local_universe = typename universe_type::local_type;
37 using sub_type = A;
38 using sub_ptr = abstract_ptr<sub_type>;
39 using split_ptr = abstract_ptr<split_type>;
41
42 constexpr static const bool is_abstract_universe = false;
43 constexpr static const bool sequential = sub_type::sequential;
44 constexpr static const bool is_totally_ordered = false;
45 constexpr static const bool preserve_bot = true;
46 constexpr static const bool preserve_top = true;
47 // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
48 constexpr static const bool preserve_join = sub_type::preserve_join;
49 constexpr static const bool preserve_meet = sub_type::preserve_meet;
50 constexpr static const bool injective_concretization = sub_type::injective_concretization;
51 constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
52 constexpr static const char* name = "SearchTree";
53
54 template <class Alloc>
55 struct tell_type {
56 typename A::template tell_type<Alloc> sub_tell;
57 typename split_type::template tell_type<Alloc> split_tell;
58 CUDA tell_type(const Alloc& alloc = Alloc{}): sub_tell(alloc), split_tell(alloc) {}
59 tell_type(const tell_type&) = default;
60 tell_type(tell_type&&) = default;
62 tell_type& operator=(const tell_type&) = default;
63
64 template <class SearchTreeTellType>
65 CUDA NI tell_type(const SearchTreeTellType& other, const Alloc& alloc = Alloc{})
66 : sub_tell(other.sub_tell, alloc), split_tell(other.split_tell, alloc)
67 {}
68
69 using allocator_type = Alloc;
71 return sub_tell.get_allocator();
72 }
73
74 template <class Alloc2>
75 friend class tell_type;
76 };
77
78 template<class Alloc>
79 using ask_type = typename A::template ask_type<Alloc>;
80
81
82 template <class A2, class S2, class Alloc2>
83 friend class SearchTree;
84
85private:
86 AType atype;
87 // `a` reflects the current node of the search tree being refined and expanded.
88 // If the search tree is `top` (i.e., empty), then `a` is equal to `nullptr`.
89 sub_ptr a;
90 split_ptr split;
91 battery::vector<branch_type, allocator_type> stack;
92 using sub_snapshot_type = sub_type::template snapshot_type<allocator_type>;
93 using split_snapshot_type = split_type::template snapshot_type<allocator_type>;
94 using root_type = battery::tuple<sub_snapshot_type, split_snapshot_type>;
95 root_type root;
96
97 // Tell formulas (and strategies) to be added to root on backtracking.
98 struct root_tell_type {
99 battery::vector<typename A::template tell_type<allocator_type>, allocator_type> sub_tells;
100 battery::vector<typename split_type::template tell_type<allocator_type>, allocator_type> split_tells;
101 CUDA root_tell_type(const allocator_type& alloc): sub_tells(alloc), split_tells(alloc) {}
102 template <class RootTellType>
103 CUDA root_tell_type(const RootTellType& other, const allocator_type& alloc)
104 : sub_tells(other.sub_tells, alloc), split_tells(other.split_tells, alloc) {}
105 };
106 root_tell_type root_tell;
107
108public:
109 CUDA SearchTree(AType uid, sub_ptr a, split_ptr split, const allocator_type& alloc = allocator_type())
110 : atype(uid)
111 , a(std::move(a))
112 , split(std::move(split))
113 , stack(alloc)
114 , root(battery::make_tuple(this->a->snapshot(alloc), this->split->snapshot(alloc)))
115 , root_tell(alloc)
116 {}
117
118 template<class A2, class S2, class Alloc2, class... Allocators>
119 CUDA NI SearchTree(const SearchTree<A2, S2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
120 : atype(other.atype)
121 , a(deps.template clone<sub_type>(other.a))
122 , split(deps.template clone<split_type>(other.split))
123 , stack(other.stack, deps.template get_allocator<allocator_type>())
124 , root(
125 sub_snapshot_type(battery::get<0>(other.root), deps.template get_allocator<allocator_type>()),
126 split_snapshot_type(battery::get<1>(other.root), deps.template get_allocator<allocator_type>()))
127 , root_tell(other.root_tell, deps.template get_allocator<allocator_type>())
128 {}
129
130 CUDA AType aty() const {
131 return atype;
132 }
133
135 return stack.get_allocator();
136 }
137
138 CUDA local::B is_singleton() const {
139 return stack.empty() && bool(a);
140 }
141
142 CUDA local::B is_top() const {
143 // We need short-circuit using && due to `a` possibly a null pointer.
144 return is_singleton() && a->is_top();
145 }
146
147 CUDA local::B is_bot() const {
148 return !bool(a);
149 }
150
151 template <class Alloc2>
153 using sub_snap_type = sub_type::template snapshot_type<Alloc2>;
154 using split_snap_type = split_type::template snapshot_type<Alloc2>;
158
163
164 template <class SnapshotType>
165 CUDA snapshot_type(const SnapshotType& other, const Alloc2& alloc = Alloc2())
166 : sub_snap(other.sub_snap, alloc)
167 , split_snap(other.split_snap, alloc)
168 , sub(other.sub)
169 {}
170
176 };
177
178 template <class Alloc2 = allocator_type>
179 CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
180 assert(is_singleton());
181 return snapshot_type<Alloc2>(a->snapshot(alloc), split->snapshot(alloc), a);
182 }
183
184 template <class Alloc2>
185 CUDA void restore(const snapshot_type<Alloc2>& snap) {
186 a = snap.sub;
187 a->restore(snap.sub_snap);
188 split->restore(snap.split_snap);
189 stack.clear();
190 root = battery::make_tuple(
191 a->snapshot(get_allocator()),
192 split->snapshot(get_allocator()));
193 root_tell = root_tell_type(get_allocator());
194 }
195
196public:
197 template <bool diagnose = false, class F, class Env, class Alloc2>
198 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
199 assert(!is_bot());
200 if(f.is(F::ESeq) && f.esig() == "search") {
201 return split->template interpret_tell<diagnose>(f, env, tell.split_tell, diagnostics);
202 }
203 else {
204 return a->template interpret_tell<diagnose>(f, env, tell.sub_tell, diagnostics);
205 }
206 }
207
208 template <bool diagnose = false, class F, class Env, class Alloc2>
209 CUDA NI bool interpret_ask(const F& f, Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
210 assert(!is_bot());
211 return a->template interpret_ask<diagnose>(f, env, ask, diagnostics);
212 }
213
214 template <IKind kind, bool diagnose = false, class F, class Env, class I>
215 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
216 if constexpr(kind == IKind::TELL) {
217 return interpret_tell<diagnose>(f, env, intermediate, diagnostics);
218 }
219 else {
220 return interpret_ask<diagnose>(f, env, intermediate, diagnostics);
221 }
222 }
223
224private:
225 template <class Alloc>
226 CUDA local::B deduce_current(const tell_type<Alloc>& t) {
227 local::B has_changed = a->deduce(t.sub_tell);
228 has_changed |= split->deduce(t.split_tell);
229 return has_changed;
230 }
231
232public:
233 template <class Alloc>
234 CUDA local::B deduce(const tell_type<Alloc>& t) {
235 if(!is_bot()) {
236 if(!is_singleton()) {
237 // We will add `t` to root when we backtrack (see `pop`) and have a chance to modify the root node.
238 root_tell.sub_tells.push_back(t.sub_tell);
239 root_tell.split_tells.push_back(t.split_tell);
240 }
241 // Nevertheless, the rest of the subtree to be explored is still updated with `t`.
242 return deduce_current(t);
243 }
244 return false;
245 }
246
247 /** The deduction of `a` and `split` is not done here, and if needed, should be done before calling this method.
248 * This deduction operator performs one iteration of \f$ \mathit{pop} \circ \mathit{push} \circ \mathit{split} \f$.
249 * In short, it initializes `a` to the next node of the search tree.
250 * If we observe `a` from the outside of this domain, `a` can backtrack, and therefore does not always evolve extensively and monotonically.
251 * Nevertheless, the deduction operator of the search tree abstract domain is extensive and monotonic (if split is) over the search tree. */
252 CUDA bool deduce() {
253 return pop(push(split->split()));
254 }
255
256 template <class ExtractionStrategy = NonAtomicExtraction>
257 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
258 return !is_bot() && a->is_extractable(strategy);
259 }
260
261 /** Extract an under-approximation if the last node popped \f$ a \f$ is an under-approximation.
262 * If `B` is a search tree, the under-approximation consists in a search tree \f$ \{a\} \f$ with a single node, in that case, `ua` must be different from `bot`. */
263 template <class B>
264 CUDA void extract(B& ua) const {
265 if constexpr(impl::is_search_tree_like<B>::value) {
266 assert(bool(ua.a));
267 a->extract(*ua.a);
268 ua.stack.clear();
269 ua.root_tell.sub_tells.clear();
270 ua.root_tell.split_tells.clear();
271 }
272 else {
273 a->extract(ua);
274 }
275 }
276
277 /** If the search tree is empty (\f$ \top \f$), we return \f$ \top_U \f$.
278 * If the search tree consists of a single node \f$ \{a\} \f$, we return the projection of `x` in that node.
279 * Projection in a search tree with multiple nodes is currently not supported (assert false). */
280 CUDA local_universe project(AVar x) const {
281 if(is_bot()) {
282 return local_universe::bot();
283 }
284 else {
285 if(is_singleton()) {
286 return a->project(x);
287 }
288 else {
289 assert(false);
290 return local_universe::top();
291 /** The problem with the method below is that we need to modify `a`, so project is not const anymore.
292 * That might be problematic to modify `a` for a projection if it is currently being refined...
293 * Perhaps need to copy `a` (inefficient), or request a projection in the snapshot directly. */
294 // a->restore(root);
295 // local_universe u = a->project(x);
296 // BInc has_changed = BInc::bot();
297 // replay(has_changed);
298 // return u;
299 }
300 }
301 }
302
303 template <class Univ>
304 CUDA void project(AVar x, Univ& r) const {
305 if(is_bot()) {
306 return r.meet_bot();
307 }
308 else {
309 if(is_singleton()) {
310 a->project(x, r);
311 }
312 else {
313 assert(false);
314 }
315 }
316 }
317
318 /** \return the current depth of the search tree. The root node has a depth of 0. */
319 CUDA size_t depth() const {
320 return stack.size();
321 }
322
323private:
324 /** \return `true` if the current node is pruned, and `false` if a new branch was pushed. */
325 CUDA bool push(branch_type&& branch) {
326 if(branch.size() > 0) {
327 if(is_singleton()) {
328 root = battery::make_tuple(
329 a->snapshot(get_allocator()),
330 split->snapshot(get_allocator()));
331 }
332 stack.push_back(std::move(branch));
333 return false;
334 }
335 return true;
336 }
337
338 /** If the current node was pruned, we need to backtrack, otherwise we just consider the next node along the branch. */
339 CUDA bool pop(bool pruned) {
340 if(!pruned) {
341 return commit_left();
342 }
343 else {
344 bool has_changed = backtrack();
345 has_changed |= commit_right();
346 return has_changed;
347 }
348 }
349
350 /** Given the current node, commit to the node on the left.
351 * If we are on the root node, we save a snapshot of root before committing to the left node. */
352 CUDA bool commit_left() {
353 assert(bool(a));
354 return a->deduce(stack.back().next());
355 }
356
357 /** We explore the next node of the search tree available (after we backtracked, so it cannot be a left node). */
358 CUDA bool commit_right() {
359 if(!stack.empty()) {
360 assert(bool(a));
361 stack.back().next();
362 return replay();
363 }
364 return false;
365 }
366
367 /** Goes from the current node to root. */
368 CUDA bool backtrack() {
369 while(!stack.empty() && !stack.back().has_next()) {
370 stack.pop_back();
371 }
372 if(!stack.empty()) {
373 a->restore(battery::get<0>(root));
374 split->restore(battery::get<1>(root));
375 return deduce_root();
376 }
377 else if(a) {
378 a = nullptr;
379 return true;
380 }
381 return false;
382 }
383
384 /** We do not always have access to the root node, so formulas that are added to the search tree are kept in `root_tell`.
385 * During backtracking, root is available through `a`, and we add to root the formulas stored until now, so they become automatically available to the remaining nodes in the search tree. */
386 CUDA bool deduce_root() {
387 bool has_changed = false;
388 if(root_tell.sub_tells.size() > 0 || root_tell.split_tells.size() > 0) {
389 for(int i = 0; i < root_tell.sub_tells.size(); ++i) {
390 has_changed |= a->deduce(root_tell.sub_tells[i]);
391 }
392 for(int i = 0; i < root_tell.split_tells.size(); ++i) {
393 has_changed |= split->deduce(root_tell.split_tells[i]);
394 }
395 root_tell.sub_tells.clear();
396 root_tell.split_tells.clear();
397 // A new snapshot is necessary since we modified `a` and `split`.
398 root = battery::make_tuple(
399 a->snapshot(get_allocator()),
400 split->snapshot(get_allocator()));
401 }
402 return has_changed;
403 }
404
405 /** Goes from `root` to the new node to be explored. */
406 CUDA bool replay() {
407 bool has_changed = false;
408 for(int i = 0; i < stack.size(); ++i) {
409 has_changed |= a->deduce(stack[i].current());
410 }
411 return has_changed;
412 }
413};
414
415}
416
417#endif
Definition search_tree.hpp:29
CUDA size_t depth() const
Definition search_tree.hpp:319
typename A::template ask_type< Alloc > ask_type
Definition search_tree.hpp:79
static constexpr const bool injective_concretization
Definition search_tree.hpp:50
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition search_tree.hpp:257
static constexpr const bool preserve_top
Definition search_tree.hpp:46
static constexpr const bool preserve_bot
Definition search_tree.hpp:45
CUDA local::B is_bot() const
Definition search_tree.hpp:147
typename A::universe_type universe_type
Definition search_tree.hpp:35
A sub_type
Definition search_tree.hpp:37
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition search_tree.hpp:185
CUDA local::B deduce(const tell_type< Alloc > &t)
Definition search_tree.hpp:234
static constexpr const bool preserve_meet
Definition search_tree.hpp:49
typename split_type::branch_type branch_type
Definition search_tree.hpp:34
CUDA NI bool interpret_ask(const F &f, Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition search_tree.hpp:209
typename A::allocator_type sub_allocator_type
Definition search_tree.hpp:32
CUDA allocator_type get_allocator() const
Definition search_tree.hpp:134
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition search_tree.hpp:198
static constexpr const bool preserve_concrete_covers
Definition search_tree.hpp:51
static constexpr const char * name
Definition search_tree.hpp:52
static constexpr const bool preserve_join
Definition search_tree.hpp:48
CUDA local_universe project(AVar x) const
Definition search_tree.hpp:280
CUDA AType aty() const
Definition search_tree.hpp:130
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition search_tree.hpp:215
static constexpr const bool sequential
Definition search_tree.hpp:43
CUDA void extract(B &ua) const
Definition search_tree.hpp:264
static constexpr const bool is_totally_ordered
Definition search_tree.hpp:44
abstract_ptr< split_type > split_ptr
Definition search_tree.hpp:39
abstract_ptr< sub_type > sub_ptr
Definition search_tree.hpp:38
static constexpr const bool is_abstract_universe
Definition search_tree.hpp:42
Allocator allocator_type
Definition search_tree.hpp:31
CUDA SearchTree(AType uid, sub_ptr a, split_ptr split, const allocator_type &alloc=allocator_type())
Definition search_tree.hpp:109
CUDA local::B is_top() const
Definition search_tree.hpp:142
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition search_tree.hpp:179
CUDA NI SearchTree(const SearchTree< A2, S2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition search_tree.hpp:119
CUDA local::B is_singleton() const
Definition search_tree.hpp:138
typename universe_type::local_type local_universe
Definition search_tree.hpp:36
CUDA bool deduce()
Definition search_tree.hpp:252
Split split_type
Definition search_tree.hpp:33
CUDA void project(AVar x, Univ &r) const
Definition search_tree.hpp:304
Definition bab.hpp:13
Definition search_tree.hpp:152
CUDA snapshot_type(const SnapshotType &other, const Alloc2 &alloc=Alloc2())
Definition search_tree.hpp:165
snapshot_type< Alloc2 > & operator=(const snapshot_type< Alloc2 > &)=default
split_type::template snapshot_type< Alloc2 > split_snap_type
Definition search_tree.hpp:154
snapshot_type(snapshot_type< Alloc2 > &&)=default
snapshot_type< Alloc2 > & operator=(snapshot_type< Alloc2 > &&)=default
snapshot_type(const snapshot_type< Alloc2 > &)=default
sub_ptr sub
Definition search_tree.hpp:157
CUDA snapshot_type(sub_snap_type &&sub_snap, split_snap_type &&split_snap, sub_ptr sub)
Definition search_tree.hpp:171
sub_type::template snapshot_type< Alloc2 > sub_snap_type
Definition search_tree.hpp:153
sub_snap_type sub_snap
Definition search_tree.hpp:155
split_snap_type split_snap
Definition search_tree.hpp:156
Definition search_tree.hpp:55
tell_type & operator=(const tell_type &)=default
tell_type(tell_type &&)=default
tell_type & operator=(tell_type &&)=default
tell_type(const tell_type &)=default
CUDA tell_type(const Alloc &alloc=Alloc{})
Definition search_tree.hpp:58
split_type::template tell_type< Alloc > split_tell
Definition search_tree.hpp:57
CUDA NI tell_type(const SearchTreeTellType &other, const Alloc &alloc=Alloc{})
Definition search_tree.hpp:65
CUDA allocator_type get_allocator() const
Definition search_tree.hpp:70
A::template tell_type< Alloc > sub_tell
Definition search_tree.hpp:56
Alloc allocator_type
Definition search_tree.hpp:69