Lattice land propagators completion library
Loading...
Searching...
No Matches
pc.hpp
Go to the documentation of this file.
1// Copyright 2021 Pierre Talbot
2
3#ifndef LALA_PC_IPC_HPP
4#define LALA_PC_IPC_HPP
5
6#include "battery/vector.hpp"
7#include "battery/unique_ptr.hpp"
8#include "battery/shared_ptr.hpp"
9#include "battery/root_ptr.hpp"
10#include "battery/allocator.hpp"
11#include "battery/algorithm.hpp"
12
13#include "lala/logic/logic.hpp"
14#include "lala/universes/arith_bound.hpp"
15#include "lala/abstract_deps.hpp"
16#include "lala/vstore.hpp"
17
18#include "terms.hpp"
19#include "formula.hpp"
20
21namespace lala {
22template <class A, class Alloc> class PC;
23namespace impl {
24 template <class>
25 struct is_pc_like {
26 static constexpr bool value = false;
27 };
28 template<class A, class Alloc>
29 struct is_pc_like<PC<A, Alloc>> {
30 static constexpr bool value = true;
31 };
32}
33
34/** PC is an abstract transformer built on top of an abstract domain `A`.
35 It is expected that `A` has a projection function `u = project(x)`.
36 We also expect a `tell(x, u, has_changed)` function to join the abstract universe `u` in the domain of the variable `x`.
37 An example of abstract domain satisfying these requirements is `VStore<Interval<ZInc>>`. */
38template <class A, class Allocator = typename A::allocator_type>
39class PC {
40public:
41 using sub_type = A;
42 using universe_type = typename A::universe_type;
43 using local_universe_type = typename universe_type::local_type;
44 using allocator_type = Allocator;
45 using sub_allocator_type = typename A::allocator_type;
47
48 template <class Alloc>
50 {
52 size_t num_props;
54
57 , sub_snap(std::move(sub_snap))
58 {}
59
64
65 template <class SnapshotType>
66 CUDA snapshot_type(const SnapshotType& other, const Alloc& alloc = Alloc{})
67 : num_props(other.num_props)
68 , sub_snap(other.sub_snap, alloc)
69 {}
70 };
71
72 using sub_ptr = abstract_ptr<sub_type>;
73
74 constexpr static const bool is_abstract_universe = false;
75 constexpr static const bool sequential = sub_type::sequential;
76 constexpr static const bool is_totally_ordered = false;
77 constexpr static const bool preserve_bot = true;
78 // The next properties should be checked more seriously, relying on the sub-domain might be uneccessarily restrictive.
79 constexpr static const bool preserve_top = sub_type::preserve_top;
80 constexpr static const bool preserve_join = sub_type::preserve_join;
81 constexpr static const bool preserve_meet = sub_type::preserve_meet;
82 constexpr static const bool injective_concretization = sub_type::injective_concretization;
83 constexpr static const bool preserve_concrete_covers = sub_type::preserve_concrete_covers;
84 constexpr static const char* name = "PC";
85
86 template <class A2, class Alloc2>
87 friend class PC;
88
89private:
92 template<class Alloc> using term_ptr = battery::unique_ptr<pc::Term<A, Alloc>, Alloc>;
93
94 AType atype;
95 sub_ptr sub;
96 using props_type = battery::vector<formula_type, allocator_type>;
97 battery::root_ptr<props_type, allocator_type> props;
98
99public:
100 template <class Alloc>
101 using formula_seq = battery::vector<pc::Formula<A, Alloc>, Alloc>;
102
103 template <class Alloc>
104 using term_seq = battery::vector<pc::Term<A, Alloc>, Alloc>;
105
106 template <class Alloc, class SubType>
108 SubType sub_value;
110
114
115 CUDA interpreted_type(const SubType& sub_value, const Alloc& alloc = Alloc{})
116 : sub_value(sub_value), props(alloc)
117 {}
118
119 CUDA interpreted_type(const Alloc& alloc = Alloc{})
120 : sub_value(alloc), props(alloc) {}
121
122 template <class InterpretedType>
123 CUDA interpreted_type(const InterpretedType& other, const Alloc& alloc = Alloc{})
124 : sub_value(other.sub_value, alloc)
125 , props(other.props, alloc)
126 {}
127
128 template <class Alloc2, class SubType2>
129 friend struct interpreted_type;
130 };
131
132 template <class Alloc>
134
135 template <class Alloc>
137
138 CUDA PC(AType atype, sub_ptr sub, const allocator_type& alloc = allocator_type{})
139 : atype(atype), sub(std::move(sub))
140 , props(battery::allocate_root<props_type, allocator_type>(alloc, alloc)) {}
141
142 CUDA PC(PC&& other)
143 : atype(other.atype)
144 , props(std::move(other.props))
145 , sub(std::move(other.sub))
146 {}
147
148private:
149 // When activated (`deps.is_shared_copy()`), we avoid copying the propagators and share them with the ones of the root `other`.
150 // This allows to save up memory and to avoid contention on L2 cache among blocks.
151 template<class A2, class Alloc2, class... Allocators>
152 CUDA static battery::root_ptr<props_type, allocator_type> init_props(const PC<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps) {
153 auto alloc = deps.template get_allocator<allocator_type>();
154 if constexpr(std::is_same_v<allocator_type, Alloc2>) {
155 if(deps.is_shared_copy()) {
156 return other.props;
157 }
158 }
159 auto r = battery::allocate_root<props_type, allocator_type>(alloc, *(other.props), alloc);
160 return std::move(r);
161 }
162
163public:
164 template<class A2, class Alloc2, class... Allocators>
165 CUDA PC(const PC<A2, Alloc2>& other, AbstractDeps<Allocators...>& deps)
166 : atype(other.atype)
167 , sub(deps.template clone<A>(other.sub))
168 , props(init_props(other, deps))
169 {}
170
172 return props.get_allocator();
173 }
174
175 CUDA AType aty() const {
176 return atype;
177 }
178
179 CUDA static this_type bot(AType atype = UNTYPED,
180 AType atype_sub = UNTYPED,
181 const allocator_type& alloc = allocator_type(),
182 const sub_allocator_type& sub_alloc = sub_allocator_type())
183 {
184 return PC{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::bot(atype_sub, sub_alloc)), alloc};
185 }
186
187 /** A special symbolic element representing top. */
188 CUDA static this_type top(AType atype = UNTYPED,
189 AType atype_sub = UNTYPED,
190 const allocator_type& alloc = allocator_type(),
191 const sub_allocator_type& sub_alloc = sub_allocator_type())
192 {
193 return PC{atype, battery::allocate_shared<sub_type>(sub_alloc, sub_type::top(atype_sub, sub_alloc)), alloc};
194 }
195
196 template <class Env>
197 CUDA static this_type bot(Env& env,
198 const allocator_type& alloc = allocator_type(),
199 const sub_allocator_type& sub_alloc = sub_allocator_type())
200 {
201 AType atype_sub = env.extends_abstract_dom();
202 AType atype = env.extends_abstract_dom();
203 return bot(atype, atype_sub, alloc, sub_alloc);
204 }
205
206 template <class Env>
207 CUDA static this_type top(Env& env,
208 const allocator_type& alloc = allocator_type(),
209 const sub_allocator_type& sub_alloc = sub_allocator_type())
210 {
211 AType atype_sub = env.extends_abstract_dom();
212 AType atype = env.extends_abstract_dom();
213 return top(atype, atype_sub, alloc, sub_alloc);
214 }
215
216private:
217 template <bool diagnose, class F, class Alloc>
218 CUDA bool interpret_unary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_ptr<Alloc>&& x) const {
219 using T = pc::Term<A, Alloc>;
220 Alloc alloc = x.get_allocator();
221 switch(f.sig()) {
222 case NEG: intermediate.push_back(T::make_neg(std::move(x))); break;
223 case ABS: intermediate.push_back(T::make_abs(std::move(x))); break;
224 default: RETURN_INTERPRETATION_ERROR("Unsupported unary symbol in a term.");
225 }
226 return true;
227 }
228
229 template <bool diagnose, class F, class Alloc>
230 CUDA bool interpret_binary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_ptr<Alloc>&& x, term_ptr<Alloc>&& y) const
231 {
232 using T = pc::Term<A, Alloc>;
233 switch(f.sig()) {
234 case ADD: intermediate.push_back(T::make_add(std::move(x), std::move(y))); break;
235 case SUB: intermediate.push_back(T::make_sub(std::move(x), std::move(y))); break;
236 case MUL: intermediate.push_back(T::make_mul(std::move(x), std::move(y))); break;
237 case TDIV: intermediate.push_back(T::make_tdiv(std::move(x), std::move(y))); break;
238 case FDIV: intermediate.push_back(T::make_fdiv(std::move(x), std::move(y))); break;
239 case CDIV: intermediate.push_back(T::make_cdiv(std::move(x), std::move(y))); break;
240 case EDIV: intermediate.push_back(T::make_ediv(std::move(x), std::move(y))); break;
241 case MIN: intermediate.push_back(T::make_min(std::move(x), std::move(y))); break;
242 case MAX: intermediate.push_back(T::make_max(std::move(x), std::move(y))); break;
243 default: RETURN_INTERPRETATION_ERROR("Unsupported binary symbol in a term.");
244 }
245 return true;
246 }
247
248 template <bool diagnose, class F, class Alloc>
249 CUDA bool interpret_nary(const F& f, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics, term_seq<Alloc>&& operands) const
250 {
251 using T = pc::Term<A, Alloc>;
252 switch(f.sig()) {
253 case ADD: intermediate.push_back(T::make_naryadd(std::move(operands))); break;
254 case MUL: intermediate.push_back(T::make_narymul(std::move(operands))); break;
255 default: RETURN_INTERPRETATION_ERROR("Unsupported nary symbol in a term.");
256 }
257 return true;
258 }
259
260 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
261 CUDA bool interpret_sequence(const F& f, Env& env, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const
262 {
263 using T = pc::Term<A, Alloc>;
264 Alloc alloc = intermediate.get_allocator();
265 term_seq<Alloc> subterms = term_seq<Alloc>(alloc);
266 formula_seq<Alloc> subformulas = formula_seq<Alloc>(alloc);
267 for(int i = 0; i < f.seq().size(); ++i) {
268 // We first try to interpret the formula `f.seq(i)` as a term, if that fails, try as a formula and wrap it in a term.
269 if(!interpret_term<kind, diagnose>(f.seq(i), env, subterms, diagnostics)) {
270 if(!interpret_formula<kind, diagnose>(f.seq(i), env, subformulas, diagnostics, true)) {
271 return false;
272 }
273 else {
274 subterms.push_back(T::make_formula(
275 battery::allocate_unique<pc::Formula<A, Alloc>>(alloc, std::move(subformulas.back()))));
276 }
277 }
278 }
279 if(subterms.size() == 1) {
280 return interpret_unary<diagnose>(f,
281 intermediate,
282 diagnostics,
283 battery::allocate_unique<T>(alloc, std::move(subterms[0])));
284 }
285 else if(subterms.size() == 2) {
286 return interpret_binary<diagnose>(f,
287 intermediate,
288 diagnostics,
289 battery::allocate_unique<T>(alloc, std::move(subterms[0])),
290 battery::allocate_unique<T>(alloc, std::move(subterms[1])));
291 }
292 else {
293 return interpret_nary<diagnose>(f,
294 intermediate,
295 diagnostics,
296 std::move(subterms));
297 }
298 }
299
300 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
301 CUDA bool interpret_term(const F& f, Env& env, term_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
302 using T = pc::Term<A, Alloc>;
303 using F2 = TFormula<Alloc>;
304 if(f.is_variable()) {
305 AVar avar;
306 if(env.template interpret<diagnose>(f, avar, diagnostics)) {
307 intermediate.push_back(T::make_var(avar));
308 return true;
309 }
310 else {
311 return false;
312 }
313 }
314 else if(f.is_constant()) {
315 auto constant = F2::make_binary(F2::make_avar(AVar{}), EQ, f, UNTYPED, intermediate.get_allocator());
317 if(local_universe_type::template interpret<kind, diagnose>(constant, env, k, diagnostics)) {
318 intermediate.push_back(T::make_constant(std::move(k)));
319 return true;
320 }
321 else {
322 RETURN_INTERPRETATION_ERROR("Constant in a term could not be interpreted in the underlying abstract universe.");
323 }
324 }
325 else if(f.is(F::Seq)) {
326 return interpret_sequence<kind, diagnose>(f, env, intermediate, diagnostics);
327 }
328 else {
329 RETURN_INTERPRETATION_ERROR("The shape of the formula is not supported in PC, and could not be interpreted as a term.");
330 }
331 }
332
333 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
334 CUDA bool interpret_negation(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context) const {
335 auto nf = negate(f);
336 if(nf.has_value()) {
337 return interpret_formula<kind, diagnose>(*nf, env, intermediate, diagnostics, neg_context);
338 }
339 else {
340 RETURN_INTERPRETATION_ERROR("We must query this formula for disentailement, but we could not compute its negation.");
341 }
342 }
343
344 template <IKind kind, bool diagnose, class F, class Env, class Alloc, class Create>
345 CUDA bool interpret_binary_logical_connector(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context, Create&& create) const {
346 using PF = pc::Formula<A, Alloc>;
347 Alloc alloc = intermediate.get_allocator();
348 formula_seq<Alloc> operands = formula_seq<Alloc>(alloc);
349 if( interpret_formula<kind, diagnose>(f, env, operands, diagnostics, neg_context)
350 && interpret_formula<kind, diagnose>(g, env, operands, diagnostics, neg_context))
351 {
352 intermediate.push_back(create(
353 battery::allocate_unique<PF>(alloc, std::move(operands[0])),
354 battery::allocate_unique<PF>(alloc, std::move(operands[1]))));
355 return true;
356 }
357 else {
358 return false;
359 }
360 }
361
362 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
363 CUDA bool interpret_conjunction(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context) const {
364 using PF = pc::Formula<A, Alloc>;
365 return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, neg_context,
366 [](auto&& l, auto&& k) { return PF::make_conj(std::move(l), std::move(k)); });
367 }
368
369 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
370 CUDA bool interpret_disjunction(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
371 using PF = pc::Formula<A, Alloc>;
372 return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
373 [](auto&& l, auto&& k) { return PF::make_disj(std::move(l), std::move(k)); });
374 }
375
376 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
377 CUDA bool interpret_biconditional(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
378 using PF = pc::Formula<A, Alloc>;
379 return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
380 [](auto&& l, auto&& k) { return PF::make_bicond(std::move(l), std::move(k)); });
381 }
382
383 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
384 CUDA bool interpret_implication(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
385 using PF = pc::Formula<A, Alloc>;
386 return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
387 [](auto&& l, auto&& k) { return PF::make_imply(std::move(l), std::move(k)); });
388 }
389
390 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
391 CUDA bool interpret_xor(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
392 using PF = pc::Formula<A, Alloc>;
393 return interpret_binary_logical_connector<kind, diagnose>(f, g, env, intermediate, diagnostics, true,
394 [](auto&& l, auto&& k) { return PF::make_xor(std::move(l), std::move(k)); });
395 }
396
397 template <IKind kind, bool diagnose, class F, class Env, class Alloc, class Create>
398 CUDA bool interpret_binary_predicate(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, Create&& create) const
399 {
400 using PF = pc::Formula<A, Alloc>;
401 using T = pc::Term<A, Alloc>;
402 Alloc alloc = intermediate.get_allocator();
403 term_seq<Alloc> operands = term_seq<Alloc>(alloc);
404 if( interpret_term<kind, diagnose>(f, env, operands, diagnostics)
405 && interpret_term<kind, diagnose>(g, env, operands, diagnostics))
406 {
407 intermediate.push_back(create(std::move(operands[0]), std::move(operands[1])));
408 return true;
409 }
410 else {
411 return false;
412 }
413 }
414
415 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
416 CUDA bool interpret_equality(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
417 using PF = pc::Formula<A, Alloc>;
418 return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
419 [](auto&& l, auto&& k) { return PF::make_eq(std::move(l), std::move(k)); });
420 }
421
422 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
423 CUDA bool interpret_disequality(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
424 using PF = pc::Formula<A, Alloc>;
425 return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
426 [](auto&& l, auto&& k) { return PF::make_neq(std::move(l), std::move(k)); });
427 }
428
429 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
430 CUDA bool interpret_inequalityLEQ(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
431 using PF = pc::Formula<A, Alloc>;
432 return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
433 [](auto&& l, auto&& k) { return PF::make_leq(std::move(l), std::move(k)); });
434 }
435
436 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
437 CUDA bool interpret_inequalityGT(const F& f, const F& g, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
438 using PF = pc::Formula<A, Alloc>;
439 return interpret_binary_predicate<kind, diagnose>(f, g, env, intermediate, diagnostics,
440 [](auto&& l, auto&& k) { return PF::make_gt(std::move(l), std::move(k)); });
441 }
442
443 template <bool neg, bool diagnose, class F, class Env, class Alloc>
444 CUDA bool interpret_literal(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
445 using PF = pc::Formula<A, Alloc>;
446 AVar avar{};
447 if(env.template interpret<diagnose>(f, avar, diagnostics)) {
448 if constexpr(neg) {
449 intermediate.push_back(PF::make_nvarlit(avar));
450 }
451 else {
452 intermediate.push_back(PF::make_pvarlit(avar));
453 }
454 return true;
455 }
456 return false;
457 }
458
459 /** Given an interval occuring in a set (LogicSet), we decompose it as a formula. */
460 template <class F, class Alloc>
461 CUDA F itv_to_formula(AType ty, const F& f, const battery::tuple<F, F>& itv, const Alloc& alloc) const {
462 using F2 = TFormula<Alloc>;
463 if(battery::get<0>(itv) == battery::get<1>(itv)) {
464 return F2::make_binary(f, EQ, battery::get<0>(itv), ty, alloc);
465 }
466 else {
467 return
468 F2::make_binary(
469 F2::make_binary(f, GEQ, battery::get<0>(itv), ty, alloc),
470 AND,
471 F2::make_binary(f, LEQ, battery::get<1>(itv), ty, alloc),
472 ty,
473 alloc);
474 }
475 }
476
477 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
478 CUDA bool interpret_in_decomposition(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context = false) const {
479 assert(f.seq(1).is(F::S));
480 using F2 = TFormula<Alloc>;
481 Alloc alloc = intermediate.get_allocator();
482 // Decompose IN into disjunction.
483 const auto& set = f.seq(1).s();
484 if(set.size() == 1) {
485 return interpret_formula<kind, diagnose>(
486 itv_to_formula(f.type(), f.seq(0), set[0], alloc),
487 env, intermediate, diagnostics, neg_context);
488 }
489 else {
490 typename F2::Sequence disjunction = typename F2::Sequence(alloc);
491 disjunction.reserve(set.size());
492 for(size_t i = 0; i < set.size(); ++i) {
493 disjunction.push_back(itv_to_formula(f.type(), f.seq(0), set[i], alloc));
494 }
495 return interpret_formula<kind, diagnose>(
496 F2::make_nary(OR, std::move(disjunction), f.type()),
497 env, intermediate, diagnostics, neg_context);
498 }
499 }
500
501 template <class F>
502 CUDA F binarize(const F& f, size_t i) const {
503 assert(f.is(F::Seq) && f.seq().size() >= 2);
504 if(i + 2 == f.seq().size()) {
505 return F::make_binary(f.seq(i), f.sig(), f.seq(i+1), f.type(), f.seq().get_allocator(), false);
506 }
507 else {
508 return F::make_binary(f.seq(i), f.sig(), binarize(f, i+1), f.type(), f.seq().get_allocator(), false);
509 }
510 }
511
512 template <bool diagnose, class F, class Env, class Alloc>
513 CUDA bool interpret_abstract_element(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics) const {
514 auto nf = negate(f);
515 if(!nf.has_value()) {
516 RETURN_INTERPRETATION_ERROR("We must query this formula for disentailement, but we could not compute its negation.");
517 }
518 else {
519 typename A::template tell_type<Alloc> tellv(intermediate.get_allocator());
520 typename A::template tell_type<Alloc> not_tellv(intermediate.get_allocator());
521 typename A::template tell_type<Alloc> askv(intermediate.get_allocator());
522 typename A::template tell_type<Alloc> not_askv(intermediate.get_allocator());
523 if( sub->template interpret<IKind::TELL, diagnose>(f, env, tellv, diagnostics)
524 && sub->template interpret<IKind::TELL, diagnose>(nf.value(), env, not_tellv, diagnostics)
525 && sub->template interpret<IKind::ASK, diagnose>(f, env, askv, diagnostics)
526 && sub->template interpret<IKind::ASK, diagnose>(nf.value(), env, not_askv, diagnostics))
527 {
528 using PF = pc::Formula<A, Alloc>;
529 intermediate.push_back(PF::make_abstract_element(std::move(tellv), std::move(not_tellv), std::move(askv), std::move(not_askv)));
530 return true;
531 }
532 return false;
533 }
534 }
535
536 /** We interpret the formula `f` in the value `intermediate`, note that we only grow `intermediate` by 0 (if interpretation failed) or 1 (if it succeeds).
537 * It is convenient to use a vector because it carries the allocator, and it is the type of the `props` component of the tell/ask type.
538 */
539 template <IKind kind, bool diagnose, class F, class Env, class Alloc>
540 CUDA bool interpret_formula(const F& f, Env& env, formula_seq<Alloc>& intermediate, IDiagnostics& diagnostics, bool neg_context = false) const {
541 using PF = pc::Formula<A, Alloc>;
542 using F2 = TFormula<Alloc>;
543 Alloc alloc = intermediate.get_allocator();
544 if(f.type() != aty() && !f.is_untyped() && !f.is_variable()) {
545 if(!neg_context) {
546 RETURN_INTERPRETATION_ERROR("The type of the formula does not match the type of this abstract domain.");
547 }
548 return interpret_abstract_element<diagnose>(f, env, intermediate, diagnostics);
549 }
550 if(f.is_false()) {
551 intermediate.push_back(PF::make_false());
552 return true;
553 }
554 else if(f.is_true()) {
555 intermediate.push_back(PF::make_true());
556 return true;
557 }
558 // Negative literal
559 else if(f.is(F::Seq) && f.seq().size() == 1 && f.sig() == NOT &&
560 f.seq(0).is_variable())
561 {
562 return interpret_literal<true, diagnose>(f.seq(0), env, intermediate, diagnostics);
563 }
564 // Positive literal
565 else if(f.is_variable()) {
566 return interpret_literal<false, diagnose>(f, env, intermediate, diagnostics);
567 }
568 // Logical negation
569 else if(f.is(F::Seq) && f.seq().size() == 1 && f.sig() == NOT) {
570 return interpret_negation<kind, diagnose>(f.seq(0), env, intermediate, diagnostics, neg_context);
571 }
572 // In constraint
573 else if(f.is(F::Seq) && f.sig() == IN) {
574 return interpret_in_decomposition<kind, diagnose>(f, env, intermediate, diagnostics, neg_context);
575 }
576 // Binarize logical connectors
577 else if(f.is(F::Seq) && f.seq().size() > 2 && (f.sig() == AND || f.sig() == OR || f.sig() == EQUIV)) {
578 return interpret_formula<kind, diagnose>(binarize(f,0), env, intermediate, diagnostics, neg_context);
579 }
580 else if(f.is_binary()) {
581 Sig sig = f.sig();
582 switch(sig) {
583 case AND: return interpret_conjunction<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics, neg_context);
584 case OR: return interpret_disjunction<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
585 case EQUIV: return interpret_biconditional<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
586 case IMPLY: return interpret_implication<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
587 case XOR: return interpret_xor<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
588 case EQ: {
589 // Whenever an operand of `=` is a formula with logical connectors, we interpret `=` as `<=>`.
590 if(f.seq(0).is_logical() || f.seq(1).is_logical()) {
591 return interpret_biconditional<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
592 }
593 else {
594 return interpret_equality<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
595 }
596 }
597 case NEQ: return interpret_disequality<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
598 case LEQ: return interpret_inequalityLEQ<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
599 case GT: return interpret_inequalityGT<kind, diagnose>(f.seq(0), f.seq(1), env, intermediate, diagnostics);
600 case GEQ: return interpret_inequalityLEQ<kind, diagnose>(f.seq(1), f.seq(0), env, intermediate, diagnostics);
601 case LT: return interpret_inequalityGT<kind, diagnose>(f.seq(1), f.seq(0), env, intermediate, diagnostics);
602 default:
603 RETURN_INTERPRETATION_ERROR("Unsupported binary symbol in a formula.");
604 }
605 }
606 RETURN_INTERPRETATION_ERROR("The shape of this formula is not supported.");
607 }
608
609public:
610 template <IKind kind, bool diagnose = false, class F, class Env, class I>
611 CUDA NI bool interpret(const F& f, Env& env, I& intermediate, IDiagnostics& diagnostics) const {
612 size_t error_context = 0;
613 if constexpr(diagnose) {
614 diagnostics.add_suberror(IDiagnostics(false, name, "Uninterpretable formula in both PC and its sub-domain.", f));
615 error_context = diagnostics.num_suberrors();
616 }
617 bool res = false;
618 AType current = f.type();
619 const_cast<F&>(f).type_as(sub->aty()); // We restore the type after the call to sub->interpret.
620 if(sub->template interpret<kind, diagnose>(f, env, intermediate.sub_value, diagnostics)) {
621 // A successful interpretation in the sub-domain does not mean it is interpreted exactly.
622 // Sometimes, we can improve the precision by interpreting it in PC.
623 // This is the case of `x in S` predicate for sub-domain that do not preserve meet.
624 if(!(f.is_binary() && f.sig() == IN && f.seq(0).is_variable() && f.seq(1).is(F::S) && f.seq(1).s().size() > 1)) {
625 res = true; // it is not a formula `x in S`.
626 }
627 else {
628 res = universe_type::preserve_join; // it is `x in S` but it preserves join.
629 }
630 }
631 const_cast<F&>(f).type_as(current);
632 if(!res) {
633 res = interpret_formula<kind, diagnose>(f, env, intermediate.props, diagnostics);
634 }
635 if constexpr(diagnose) {
636 diagnostics.merge(res, error_context);
637 }
638 return res;
639 }
640
641 /** PC expects a non-conjunctive formula \f$ c \f$ which can either be interpreted in the sub-domain `A` or in the current domain.
642 */
643 template <bool diagnose = false, class F, class Env, class Alloc2>
644 CUDA NI bool interpret_tell(const F& f, Env& env, tell_type<Alloc2>& tell, IDiagnostics& diagnostics) const {
645 return interpret<IKind::TELL, diagnose>(f, env, tell, diagnostics);
646 }
647
648 template <bool diagnose = false, class F, class Env, class Alloc2>
649 CUDA NI bool interpret_ask(const F& f, const Env& env, ask_type<Alloc2>& ask, IDiagnostics& diagnostics) const {
650 return interpret<IKind::ASK, diagnose>(f, const_cast<Env&>(env), ask, diagnostics);
651 }
652
653 /** Note that we cannot add propagators in parallel (but modifying the underlying domain is ok).
654 This is a current limitation that we should fix later on.
655 Notes for later:
656 * To implement "telling of propagators", we would need to check if a propagator has already been added or not (for idempotency).
657 * 1. Walk through the existing propagators to check which ones are already in.
658 * 2. If a propagator has the same shape but different constant `U`, meet them in place. */
659 template <class Alloc2>
660 CUDA local::B deduce(const tell_type<Alloc2>& t) {
661 local::B has_changed = sub->deduce(t.sub_value);
662 if(t.props.size() > 0) {
663 auto& props2 = *props;
664 size_t n = props2.size();
665 props2.reserve(n + t.props.size());
666 for(int i = 0; i < t.props.size(); ++i) {
667 props2.push_back(formula_type(t.props[i], get_allocator()));
668 }
669 battery::vector<size_t> lengths(props2.size());
670 for(int i = 0; i < props2.size(); ++i) {
671 lengths[i] = props2[i].length();
672 }
673 battery::sorti(props2,
674 [&](int i, int j) {
675 return props2[i].kind() < props2[j].kind() || (props2[i].kind() == props2[j].kind() && lengths[i] < lengths[j]);
676 });
677 return true;
678 }
679 return has_changed;
680 }
681
682 CUDA bool embed(AVar x, const universe_type& dom) {
683 return sub->embed(x, dom);
684 }
685
686 template <class Alloc2>
687 CUDA local::B ask(const ask_type<Alloc2>& t) const {
688 for(int i = 0; i < t.props.size(); ++i) {
689 if(!t.props[i].ask(*sub)) {
690 return false;
691 }
692 }
693 return sub->ask(t.sub_value);
694 }
695
696 CUDA local::B ask(size_t i) const {
697 return (*props)[i].ask(*sub);
698 }
699
700 CUDA size_t num_deductions() const {
701 return sub->num_deductions() + props->size();
702 }
703
704 CUDA local::B deduce(size_t i) {
705 assert(i < num_deductions());
706 if(is_top()) { return false; }
707 else if(i < sub->num_deductions()) {
708 return sub->deduce(i);
709 }
710 else {
711 return (*props)[i - sub->num_deductions()].deduce(*sub);
712 }
713 }
714
715 // Functions forwarded to the sub-domain `A`.
716
717 /** `true` if the underlying abstract element is bot, `false` otherwise. */
718 CUDA local::B is_bot() const {
719 return sub->is_bot();
720 }
721
722 /** `true` if the underlying abstract element is top and there is no deduction function, `false` otherwise. */
723 CUDA local::B is_top() const {
724 return sub->is_top() && props->size() == 0;
725 }
726
727 CUDA auto /* universe_type or const universe_type& */ operator[](int x) const {
728 return (*sub)[x];
729 }
730
731 CUDA auto /* universe_type or const universe_type& */ project(AVar x) const {
732 return sub->project(x);
733 }
734
735 template <class Univ>
736 CUDA void project(AVar x, Univ& u) const {
737 sub->project(x, u);
738 }
739
740 CUDA size_t vars() const {
741 return sub->vars();
742 }
743
744 template <class Alloc2 = allocator_type>
745 CUDA snapshot_type<Alloc2> snapshot(const Alloc2& alloc = Alloc2()) const {
746 return snapshot_type<Alloc2>(props->size(), sub->snapshot(alloc));
747 }
748
749 template <class Alloc2>
750 CUDA void restore(const snapshot_type<Alloc2>& snap) {
751 size_t n = props->size();
752 for(size_t i = snap.num_props; i < n; ++i) {
753 props->pop_back();
754 }
755 sub->restore(snap.sub_snap);
756 }
757
758 /** An abstract element is extractable when it is not equal to bot, if all propagators are entailed and if the underlying abstract element is extractable. */
759 template <class ExtractionStrategy = NonAtomicExtraction>
760 CUDA bool is_extractable(const ExtractionStrategy& strategy = ExtractionStrategy()) const {
761 if(is_bot()) {
762 return false;
763 }
764 for(int i = 0; i < props->size(); ++i) {
765 if(!ask(i)) {
766 return false;
767 }
768 }
769 return sub->is_extractable(strategy);
770 }
771
772 /** Extract the current element into `ua`.
773 * \pre `is_extractable()` must be `true`.
774 * For efficiency reason, if `B` is a propagator completion, the propagators are not copied in `ua`.
775 * (It is OK, since they are entailed, they don't bring information anymore.) */
776 template <class B>
777 CUDA void extract(B& ua) const {
778 if constexpr(impl::is_pc_like<B>::value) {
779 sub->extract(*ua.sub);
780 }
781 else {
782 sub->extract(ua);
783 }
784 }
785
786 template<class Env, class Allocator2 = typename Env::allocator_type>
787 CUDA NI TFormula<Allocator2> deinterpret(const Env& env, Allocator2 allocator = Allocator2()) const {
788 using F = TFormula<Allocator2>;
789 typename F::Sequence seq{allocator};
790 seq.push_back(sub->deinterpret(env, allocator));
791 for(int i = 0; i < props->size(); ++i) {
792 seq.push_back((*props)[i].deinterpret(*sub, env, aty(), allocator));
793 }
794 return F::make_nary(AND, std::move(seq), aty());
795 }
796
797 template<class I, class Env, class Allocator2 = typename Env::allocator_type>
798 CUDA NI TFormula<Allocator2> deinterpret(const I& intermediate, const Env& env, Allocator2 allocator = Allocator2()) const {
799 using F = TFormula<Allocator2>;
800 typename F::Sequence seq{allocator};
801 seq.push_back(sub->deinterpret(intermediate.sub_value, env, allocator));
802 for(int i = 0; i < intermediate.props.size(); ++i) {
803 seq.push_back(intermediate.props[i].deinterpret(*sub, env, aty(), allocator));
804 }
805 return F::make_nary(AND, std::move(seq), aty());
806 }
807};
808
809}
810
811#endif
Definition pc.hpp:39
CUDA size_t vars() const
Definition pc.hpp:740
CUDA AType aty() const
Definition pc.hpp:175
static constexpr const char * name
Definition pc.hpp:84
A sub_type
Definition pc.hpp:41
CUDA local::B deduce(size_t i)
Definition pc.hpp:704
CUDA allocator_type get_allocator() const
Definition pc.hpp:171
CUDA PC(const PC< A2, Alloc2 > &other, AbstractDeps< Allocators... > &deps)
Definition pc.hpp:165
static CUDA this_type bot(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pc.hpp:179
static constexpr const bool preserve_bot
Definition pc.hpp:77
CUDA PC(PC &&other)
Definition pc.hpp:142
typename A::allocator_type sub_allocator_type
Definition pc.hpp:45
CUDA local::B deduce(const tell_type< Alloc2 > &t)
Definition pc.hpp:660
CUDA NI bool interpret(const F &f, Env &env, I &intermediate, IDiagnostics &diagnostics) const
Definition pc.hpp:611
CUDA void project(AVar x, Univ &u) const
Definition pc.hpp:736
static CUDA this_type top(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pc.hpp:207
battery::vector< pc::Formula< A, Alloc >, Alloc > formula_seq
Definition pc.hpp:101
typename A::universe_type universe_type
Definition pc.hpp:42
static constexpr const bool preserve_meet
Definition pc.hpp:81
static constexpr const bool injective_concretization
Definition pc.hpp:82
CUDA bool is_extractable(const ExtractionStrategy &strategy=ExtractionStrategy()) const
Definition pc.hpp:760
static constexpr const bool is_totally_ordered
Definition pc.hpp:76
CUDA void extract(B &ua) const
Definition pc.hpp:777
CUDA NI TFormula< Allocator2 > deinterpret(const I &intermediate, const Env &env, Allocator2 allocator=Allocator2()) const
Definition pc.hpp:798
static CUDA this_type top(AType atype=UNTYPED, AType atype_sub=UNTYPED, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pc.hpp:188
CUDA size_t num_deductions() const
Definition pc.hpp:700
CUDA NI bool interpret_tell(const F &f, Env &env, tell_type< Alloc2 > &tell, IDiagnostics &diagnostics) const
Definition pc.hpp:644
typename universe_type::local_type local_universe_type
Definition pc.hpp:43
CUDA local::B ask(size_t i) const
Definition pc.hpp:696
CUDA auto operator[](int x) const
Definition pc.hpp:727
CUDA local::B is_bot() const
Definition pc.hpp:718
CUDA local::B is_top() const
Definition pc.hpp:723
Allocator allocator_type
Definition pc.hpp:44
interpreted_type< Alloc, typename sub_type::template tell_type< Alloc > > tell_type
Definition pc.hpp:133
static constexpr const bool preserve_concrete_covers
Definition pc.hpp:83
CUDA PC(AType atype, sub_ptr sub, const allocator_type &alloc=allocator_type{})
Definition pc.hpp:138
CUDA auto project(AVar x) const
Definition pc.hpp:731
CUDA NI TFormula< Allocator2 > deinterpret(const Env &env, Allocator2 allocator=Allocator2()) const
Definition pc.hpp:787
battery::vector< pc::Term< A, Alloc >, Alloc > term_seq
Definition pc.hpp:104
static constexpr const bool preserve_join
Definition pc.hpp:80
CUDA bool embed(AVar x, const universe_type &dom)
Definition pc.hpp:682
CUDA snapshot_type< Alloc2 > snapshot(const Alloc2 &alloc=Alloc2()) const
Definition pc.hpp:745
static constexpr const bool is_abstract_universe
Definition pc.hpp:74
static constexpr const bool preserve_top
Definition pc.hpp:79
static constexpr const bool sequential
Definition pc.hpp:75
CUDA void restore(const snapshot_type< Alloc2 > &snap)
Definition pc.hpp:750
CUDA local::B ask(const ask_type< Alloc2 > &t) const
Definition pc.hpp:687
abstract_ptr< sub_type > sub_ptr
Definition pc.hpp:72
static CUDA this_type bot(Env &env, const allocator_type &alloc=allocator_type(), const sub_allocator_type &sub_alloc=sub_allocator_type())
Definition pc.hpp:197
CUDA NI bool interpret_ask(const F &f, const Env &env, ask_type< Alloc2 > &ask, IDiagnostics &diagnostics) const
Definition pc.hpp:649
Definition terms.hpp:16
Definition formula.hpp:8
Definition pc.hpp:107
interpreted_type(interpreted_type &&)=default
SubType sub_value
Definition pc.hpp:108
interpreted_type(const interpreted_type &)=default
CUDA interpreted_type(const Alloc &alloc=Alloc{})
Definition pc.hpp:119
CUDA interpreted_type(const InterpretedType &other, const Alloc &alloc=Alloc{})
Definition pc.hpp:123
interpreted_type & operator=(interpreted_type &&)=default
formula_seq< Alloc > props
Definition pc.hpp:109
CUDA interpreted_type(const SubType &sub_value, const Alloc &alloc=Alloc{})
Definition pc.hpp:115
Definition pc.hpp:50
A::template snapshot_type< Alloc > sub_snap_type
Definition pc.hpp:51
sub_snap_type sub_snap
Definition pc.hpp:53
snapshot_type(snapshot_type< Alloc > &&)=default
CUDA snapshot_type(size_t num_props, sub_snap_type &&sub_snap)
Definition pc.hpp:55
snapshot_type< Alloc > & operator=(const snapshot_type< Alloc > &)=default
CUDA snapshot_type(const SnapshotType &other, const Alloc &alloc=Alloc{})
Definition pc.hpp:66
size_t num_props
Definition pc.hpp:52
snapshot_type< Alloc > & operator=(snapshot_type< Alloc > &&)=default
snapshot_type(const snapshot_type< Alloc > &)=default