SMTStabilizer API
Public API documentation for SMTStabilizer
Loading...
Searching...
No Matches
bitvector.cpp
Go to the documentation of this file.
1/***
2 * Bitwuzla: Satisfiability Modulo Theories (SMT) solver.
3 *
4 * Copyright (C) 2020 by the authors listed in the AUTHORS file at
5 * https://github.com/bitwuzla/bitwuzla/blob/main/AUTHORS
6 *
7 * This file is part of Bitwuzla under the MIT license. See COPYING for more
8 * information at https://github.com/bitwuzla/bitwuzla/blob/main/COPYING
9 */
10// Modified by Xiang Zhang, 2026
11// Additional changes licensed under the MIT License
12#include "bitvector.h"
13
14#include <bitset>
15#include <cassert>
16#include <iostream>
17#include <sstream>
18#include <utility>
19
20#include "util/gmp_utils.h"
21#include "util/hash.h"
22
24
25using namespace util;
26
27namespace {
28#ifndef NDEBUG
30bool is_valid_bin_str(const std::string &str) {
31 for (const char &c : str) {
32 if (c != '0' && c != '1')
33 return false;
34 }
35 return true;
36}
38bool is_valid_dec_str(const std::string &str) {
39 for (size_t i = str[0] == '-' ? 1 : 0, n = str.size(); i < n; ++i) {
40 if (str[i] < '0' || str[i] > '9')
41 return false;
42 }
43 return true;
44}
46bool is_valid_hex_str(const std::string &str) {
47 for (size_t i = 0, n = str.size(); i < n; ++i) {
48 if ((str[i] < '0' || str[i] > '9') && (str[i] < 'a' || str[i] > 'f') &&
49 (str[i] < 'A' || str[i] > 'F')) {
50 return false;
51 }
52 }
53 return true;
54}
55#endif
56
57#if !defined(__GNUC__) && !defined(__clang__)
58static uint64_t clz_limb(uint64_t nbits_per_limb, mp_limb_t limb) {
59 uint64_t w;
60 mp_limb_t mask;
61 mp_limb_t one = 1u;
62 for (w = 0, mask = 0; w < nbits_per_limb; ++w) {
63 mask += (one << w);
64 if ((limb & ~mask) == 0)
65 break;
66 }
67 return nbits_per_limb - 1 - w;
68}
69#endif
70
71} // namespace
72
73bool BitVector::fits_in_size(uint64_t size, const std::string &str, uint32_t base) {
74 bool is_neg = str[0] == '-';
75 bool res;
76
77 mpz_t tmp;
78 /* We do not want to normalize to 'size'. */
79 mpz_init_set_str(tmp, str.c_str(), base);
80
81 if (is_neg) {
83 mpz_abs(tmp, tmp);
84 if (min.is_gmp()) {
85 res = mpz_cmp(tmp, min.d_val_gmp) <= 0;
86 }
87 else {
88 res = mpz_cmp_ui(tmp, min.d_val_uint64) <= 0;
89 }
90 }
91 else {
93 if (max.is_gmp()) {
94 res = mpz_cmp(tmp, max.d_val_gmp) <= 0;
95 }
96 else {
97 res = mpz_cmp_ui(tmp, max.d_val_uint64) <= 0;
98 }
99 }
100 mpz_clear(tmp);
101 return res;
102}
103
104bool BitVector::fits_in_size(uint64_t size, uint64_t value, bool sign) {
105 if (sign) {
106 return fits_in_size(size, std::to_string((int64_t)value), 10);
107 }
108 mpz_t tmp;
109 /* We do not want to normalize to 'size'. */
110 mpz_init_set_ull(tmp, value);
111 bool res = size >= mpz_sizeinbase(tmp, 2);
112 mpz_clear(tmp);
113 return res;
114}
115
116bool BitVector::fits_in_size(uint64_t size, const mpz_t value) {
117 return size >= mpz_sizeinbase(value, 2);
118}
119
121
123
124BitVector BitVector::mk_zero(uint64_t size) { return BitVector(size); }
125
126BitVector BitVector::mk_one(uint64_t size) { return from_ui(size, 1); }
127
130 if (size > s_native_size) {
131 mpz_set_ui(res.d_val_gmp, 1);
132 mpz_mul_2exp_ull(res.d_val_gmp, res.d_val_gmp, size);
133 mpz_sub_ui(res.d_val_gmp, res.d_val_gmp, 1);
134 }
135 else {
136 res.d_val_uint64 = uint64_fdiv_r_2exp(size, UINT64_MAX);
137 }
138 return res;
139}
140
143 res.set_bit(size - 1, true);
144 return res;
145}
146
149 res.set_bit(size - 1, false);
150 return res;
151}
152
154 assert(!c.is_null());
155 assert(!t.is_null());
156 assert(!e.is_null());
157 assert(c.d_size == 1);
158 assert(t.d_size == e.d_size);
159 return c.is_true() ? t : e;
160}
161
162BitVector::BitVector() : d_size(0), d_val_uint64(0) {}
163
164BitVector::BitVector(uint64_t size) : d_size(size), d_val_uint64(0) {
165 assert(size > 0);
166 if (is_gmp()) {
168 }
169}
170
171BitVector::BitVector(uint64_t size, const std::string &value, uint32_t base)
172 : d_size(size) {
173 assert(!value.empty());
174 assert(base != 2 || is_valid_bin_str(value));
175 assert(base != 10 || is_valid_dec_str(value));
176 assert(base != 16 || is_valid_hex_str(value));
177 assert(fits_in_size(size, value, base));
178 if (is_gmp()) {
179 mpz_init_set_str(d_val_gmp, value.c_str(), base);
180 /* BitVector asserts that given string must fit into bv after conversion.
181 * However, we still need to normalize negative values. Negative values are
182 * represented as "-xxx" (where xxx is the binary representation of the
183 * absolute value of 'value') in GMP when created from mpz_init_set_str. */
185 }
186 else {
188 size, std::stoull(value, 0, static_cast<int32_t>(base)));
189 }
190}
191
192BitVector::BitVector(uint64_t size, const mpz_t value, bool truncate)
193 : d_size(size) {
194 (void)truncate;
195 assert(size > 0);
196 assert(truncate || fits_in_size(size, value));
197
198 mpz_init_set(d_val_gmp, value);
199 if (mpz_sgn(value) < 0) {
203 }
205
206 // Can we use the native type?
207 if (!is_gmp()) {
211 }
212}
213
214BitVector BitVector::from_ui(uint64_t size, uint64_t value, bool truncate) {
215 (void)truncate;
216 assert(size > 0);
217 assert(truncate || fits_in_size(size, value));
218
220 if (res.is_gmp()) {
221 mpz_init_set_ull(res.d_val_gmp, value);
222 mpz_fdiv_r_2exp_ull(res.d_val_gmp, res.d_val_gmp, size);
223 }
224 else {
225 res.d_val_uint64 = uint64_fdiv_r_2exp(size, value);
226 }
227 return res;
228}
229
230BitVector BitVector::from_si(uint64_t size, int64_t value, bool truncate) {
231 (void)truncate;
232 assert(size > 0);
233 assert(truncate || fits_in_size(size, static_cast<uint64_t>(value), true));
234
236 if (res.is_gmp()) {
237 mpz_init_set_sll(res.d_val_gmp, value);
238 mpz_fdiv_r_2exp_ull(res.d_val_gmp, res.d_val_gmp, size);
239 }
240 else {
241 res.d_val_uint64 = uint64_fdiv_r_2exp(size, static_cast<uint64_t>(value));
242 }
243 return res;
244}
245
247 if (other.is_null()) {
248 d_size = 0;
249 d_val_uint64 = 0;
250 }
251 else {
252 if (d_size != other.d_size) {
253 d_size = other.d_size;
254 }
255 if (is_gmp()) {
256 mpz_init_set(d_val_gmp, other.d_val_gmp);
257 }
258 else {
259 d_val_uint64 = other.d_val_uint64;
260 }
261 }
262}
263
265 if (is_gmp()) {
266 if (other.is_gmp()) {
267 mpz_set(d_val_gmp, other.d_val_gmp);
268 mpz_clear(other.d_val_gmp);
269 }
270 else {
272 d_val_uint64 = std::exchange(other.d_val_uint64, 0);
273 }
274 }
275 else {
276 if (other.is_gmp()) {
277 mpz_init_set(d_val_gmp, other.d_val_gmp);
278 mpz_clear(other.d_val_gmp);
279 }
280 else {
281 d_val_uint64 = std::exchange(other.d_val_uint64, 0);
282 }
283 }
284 d_size = std::exchange(other.d_size, 0);
285}
286
288 if (is_gmp()) {
290 }
291}
292
294 if (&other == this)
295 return *this;
296 if (other.is_null()) {
297 if (is_gmp()) {
299 }
300 d_size = 0;
301 d_val_uint64 = 0;
302 }
303 else {
304 if (!is_gmp()) {
305 if (other.is_gmp()) {
306 mpz_init_set(d_val_gmp, other.d_val_gmp);
307 }
308 else {
309 d_val_uint64 = other.d_val_uint64;
310 }
311 }
312 else {
313 if (!other.is_gmp()) {
315 d_val_uint64 = other.d_val_uint64;
316 }
317 else {
318 mpz_set(d_val_gmp, other.d_val_gmp);
319 }
320 }
321 d_size = other.d_size;
322 }
323 return *this;
324}
325
326size_t BitVector::hash() const {
327 uint64_t j = 0, res = 0;
328 uint64_t x, p0, p1;
329
330 res = d_size * util::hash::s_hash_primes[j++];
331
332 if (is_gmp()) {
334 }
335 else {
336 p0 = util::hash::s_hash_primes[j++];
337 if (j == util::hash::s_n_primes)
338 j = 0;
339 p1 = util::hash::s_hash_primes[j++];
340 if (j == util::hash::s_n_primes)
341 j = 0;
342 x = d_val_uint64 ^ res;
343 x = ((x >> 16) ^ x) * p0;
344 x = ((x >> 16) ^ x) * p1;
345 res = ((x >> 16) ^ x);
346 }
347
348 return res;
349}
350
351void BitVector::iset(uint64_t value) {
352 assert(!is_null());
353 assert(d_size);
354 if (is_gmp()) {
355 mpz_set_ull(d_val_gmp, value);
357 }
358 else {
360 }
361}
362
363void BitVector::iset(const BitVector &bv) {
364 assert(!is_null());
365 assert(!bv.is_null());
366 assert(d_size == bv.d_size);
367 if (is_gmp()) {
368 mpz_set(d_val_gmp, bv.d_val_gmp);
369 }
370 else {
371 d_val_uint64 = bv.d_val_uint64;
372 }
373}
374
375bool BitVector::operator==(const BitVector &bv) const {
376 if (is_null())
377 return bv.is_null();
378 return !bv.is_null() && compare(bv) == 0;
379}
380
381bool BitVector::operator!=(const BitVector &bv) const {
382 if (is_null())
383 return !bv.is_null();
384 return bv.is_null() || compare(bv) != 0;
385}
386
387std::string BitVector::str(uint32_t base) const {
388 if (is_null())
389 return "(nil)";
390
391 if (is_gmp()) {
392 std::stringstream res;
393 char *tmp = mpz_get_str(0, base, d_val_gmp);
394 assert(tmp[0] != '-'); // may not be negative
395 if (base == 2) {
396 /* Pad with leading zeros for binary representation. */
397 uint64_t n = strlen(tmp);
398 uint64_t diff = d_size - n;
399 assert(n <= d_size);
400 res << std::string(diff, '0');
401 }
402 res << tmp;
403 assert(base != 2 || res.str().size() == d_size);
404 free(tmp);
405 return res.str();
406 }
407
408 if (base == 10) {
409 return std::to_string(d_val_uint64);
410 }
411 if (base == 16) {
412 std::stringstream res;
413 res << std::hex << d_val_uint64;
414 return res.str();
415 }
416 std::string res = std::bitset<64>(d_val_uint64).to_string();
417 return res.substr(64 - d_size);
418}
419
420uint64_t BitVector::to_uint64(bool truncate) const {
421 (void)truncate;
422 assert(!is_null());
423 if (is_gmp()) {
424 assert(truncate || fits_in_size(64, d_val_gmp));
425 return mpz_get_ull(d_val_gmp);
426 }
427 assert(truncate || fits_in_size(64, d_val_uint64));
428 return d_val_uint64;
429}
430
431int32_t BitVector::compare(const BitVector &bv) const {
432 assert(!is_null());
433 assert(!bv.is_null());
434
435 if (d_size != bv.d_size) {
436 return -1;
437 }
438
439 if (is_gmp()) {
440 return mpz_cmp(d_val_gmp, bv.d_val_gmp);
441 }
442
443 if (d_val_uint64 == bv.d_val_uint64) {
444 return 0;
445 }
446 if (d_val_uint64 < bv.d_val_uint64) {
447 return -1;
448 }
449 return 1;
450}
451
452int32_t BitVector::signed_compare(const BitVector &bv) const {
453 assert(!is_null());
454 assert(!bv.is_null());
455
456 if (d_size != bv.d_size) {
457 return -1;
458 }
459
460 uint64_t msb_a = msb();
461 uint64_t msb_b = bv.msb();
462
463 if (msb_a && !msb_b) {
464 return -1;
465 }
466 if (!msb_a && msb_b) {
467 return 1;
468 }
469 return compare(bv);
470}
471
472bool BitVector::bit(uint64_t idx) const {
473 assert(!is_null());
474 assert(idx < size());
475 if (is_gmp()) {
476 return mpz_tstbit(d_val_gmp, idx);
477 }
478 return (d_val_uint64 >> idx) & 1;
479}
480
481void BitVector::set_bit(uint64_t idx, bool value) {
482 assert(!is_null());
483 assert(idx < d_size);
484 if (is_gmp()) {
485 if (value) {
487 }
488 else {
490 }
491 }
492 else {
493 if (value) {
494 d_val_uint64 |= ((uint64_t)1 << idx);
495 }
496 else {
497 d_val_uint64 &= ~((uint64_t)1 << idx);
498 }
499 }
500}
501
502void BitVector::flip_bit(uint64_t idx) {
503 assert(!is_null());
504 if (is_gmp()) {
506 }
507 else {
508 set_bit(idx, bit(idx) ? false : true);
509 }
510}
511
512bool BitVector::lsb() const {
513 assert(!is_null());
514 return bit(0);
515}
516
517bool BitVector::msb() const {
518 assert(!is_null());
519 return bit(d_size - 1);
520}
521
522bool BitVector::is_true() const {
523 assert(!is_null());
524 if (d_size > 1)
525 return false;
526 return bit(0);
527}
528
530 assert(!is_null());
531 if (d_size > 1)
532 return false;
533 return !bit(0);
534}
535
536bool BitVector::is_zero() const {
537 assert(!is_null());
538 if (is_gmp()) {
539 return mpz_cmp_ui(d_val_gmp, 0) == 0;
540 }
541 return d_val_uint64 == 0;
542}
543
544bool BitVector::is_ones() const {
545 assert(!is_null());
546
547 if (is_gmp()) {
549 if (n == 0)
550 return false; // zero
551 uint64_t m = d_size / static_cast<uint64_t>(mp_bits_per_limb);
552 if (d_size % static_cast<uint64_t>(mp_bits_per_limb))
553 m += 1;
554 if (m != n)
555 return false; // less limbs used than expected, not ones
557 for (uint64_t i = 0; i < n - 1; ++i) {
559 if ((static_cast<uint64_t>(limb)) != max)
560 return false;
561 }
563 if (d_size % static_cast<uint64_t>(mp_bits_per_limb) == 0) {
564 return (static_cast<uint64_t>(limb)) == max;
565 }
566 m = static_cast<uint64_t>(mp_bits_per_limb) -
567 d_size % static_cast<uint64_t>(mp_bits_per_limb);
568 return (static_cast<uint64_t>(limb)) == (max >> m);
569 }
571}
572
573bool BitVector::is_one() const {
574 assert(!is_null());
575 if (is_gmp()) {
576 return mpz_cmp_ui(d_val_gmp, 1) == 0;
577 }
578 return d_val_uint64 == 1;
579}
580
582 assert(!is_null());
583 if (is_gmp()) {
584 if (mpz_scan1(d_val_gmp, 0) != d_size - 1)
585 return false;
586 }
587 else {
588 if (d_val_uint64 !=
589 ((uint64_t)1 << (d_size == 64 ? 63 : ((d_size % 64) - 1)))) {
590 return false;
591 }
592 }
593 return true;
594}
595
597 assert(!is_null());
598 if (is_gmp()) {
599 if (mpz_scan0(d_val_gmp, 0) != d_size - 1)
600 return false;
601 }
602 else {
603 if (d_size == 1)
604 return d_val_uint64 == 0;
605 if (d_val_uint64 != (~((uint64_t)0) >> (64 - d_size + 1))) {
606 return false;
607 }
608 }
609 return true;
610}
611
613 assert(!is_null());
614 return !is_zero() && bvdec().ibvand(*this).is_zero();
615}
616
618 assert(!is_null());
619 return is_min_signed();
620}
621
623 assert(!is_null());
624 assert(d_size == bv.d_size);
625 mpz_t add;
626 if (is_gmp()) {
627 mpz_init(add);
628 mpz_add(add, d_val_gmp, bv.d_val_gmp);
629 }
630 else {
632 mpz_add_ui(add, add, bv.d_val_uint64);
633 }
634 mpz_fdiv_q_2exp_ull(add, add, d_size);
635 bool res = mpz_cmp_ui(add, 0) != 0;
636 mpz_clear(add);
637 return res;
638}
639
641 assert(!is_null());
642 assert(d_size == bv.d_size);
643 bool is_signed_bv0 = msb();
644 bool is_signed_bv1 = bv.msb();
645 bool is_signed_add = bvadd(bv).msb();
646 // Overflow occurs if
647 // 1) negative + negative = positive
648 // 2) positive + positive = negative
649 return (is_signed_bv0 && is_signed_bv1 && !is_signed_add) ||
651}
652
654 assert(!is_null());
655 assert(d_size == bv.d_size);
656 return compare(bv) < 0;
657}
658
660 assert(!is_null());
661 assert(d_size == bv.d_size);
662 bool is_signed_bv0 = msb();
663 bool is_signed_bv1 = bv.msb();
664 bool is_signed_sub = bvsub(bv).msb();
665 // Overflow occurs if
666 // 1) negative - positive = positive
667 // 2) positive - negative = negative
668 return (is_signed_bv0 && !is_signed_bv1 && !is_signed_sub) ||
670}
671
673 assert(!is_null());
674 assert(d_size == bv.d_size);
675 if (d_size > 1) {
676 mpz_t mul;
677 if (is_gmp()) {
678 mpz_init(mul);
679 mpz_mul(mul, d_val_gmp, bv.d_val_gmp);
680 }
681 else {
683 mpz_mul_ui(mul, mul, bv.d_val_uint64);
684 }
685 mpz_fdiv_q_2exp_ull(mul, mul, d_size);
686 bool res = mpz_cmp_ui(mul, 0) != 0;
687 mpz_clear(mul);
688 return res;
689 }
690 return false;
691}
692
694 assert(!is_null());
695 assert(d_size == bv.d_size);
696
697 if (d_size == 1) {
698 return is_one() && bv.is_one();
699 }
700
701 BitVector mul(bvsext(d_size)); /* copy to guard for bv1 == *this */
702 mul.ibvmul(bv.bvsext(d_size));
704 0 ||
706 0;
707}
708
710 assert(!is_null());
711 assert(d_size == bv.d_size);
712 return is_min_signed() && bv.is_ones();
713}
714
716 assert(!is_null());
717 uint64_t res = 0;
718 if (is_gmp()) {
719 res = mpz_scan1(d_val_gmp, 0);
720 if (res > d_size)
721 res = d_size;
722 }
723 else {
724 for (uint64_t i = 0; i < d_size; ++i) {
725 if (bit(i))
726 break;
727 res += 1;
728 }
729 }
730 return res;
731}
732
734 assert(!is_null());
735 uint64_t res = 0;
736 if (is_gmp()) {
737 res = mpz_scan0(d_val_gmp, 0);
738 if (res > d_size)
739 res = d_size;
740 }
741 else {
742 for (uint64_t i = 0; i < d_size; ++i) {
743 if (!bit(i))
744 break;
745 res += 1;
746 }
747 }
748 return res;
749}
750
752 assert(!is_null());
753 return count_leading(true);
754}
755
757 assert(!is_null());
758 return count_leading(false);
759}
760
761/* -------------------------------------------------------------------------- */
762/* Bit-vector operations. */
763/* -------------------------------------------------------------------------- */
764
766
768
770
772
774
775BitVector BitVector::bvredor() const { return BitVector(1).ibvredor(*this); }
776
778
779BitVector BitVector::bvnego() const { return BitVector(1).ibvnego(*this); }
780
782 return BitVector(d_size).ibvadd(*this, bv);
783}
784
786 return BitVector(d_size).ibvsub(*this, bv);
787}
788
790 return BitVector(d_size).ibvand(*this, bv);
791}
792
794 return BitVector(1).ibvimplies(*this, bv);
795}
796
798 return BitVector(d_size).ibvnand(*this, bv);
799}
800
802 return BitVector(d_size).ibvnor(*this, bv);
803}
804
806 return BitVector(d_size).ibvor(*this, bv);
807}
808
810 return BitVector(d_size).ibvxnor(*this, bv);
811}
812
814 return BitVector(d_size).ibvxor(*this, bv);
815}
816
818 return BitVector(1).ibveq(*this, bv);
819}
820
822 return BitVector(1).ibvne(*this, bv);
823}
824
826 return BitVector(1).ibvult(*this, bv);
827}
828
830 return BitVector(1).ibvule(*this, bv);
831}
832
834 return BitVector(1).ibvugt(*this, bv);
835}
836
838 return BitVector(1).ibvuge(*this, bv);
839}
840
842 return BitVector(1).ibvslt(*this, bv);
843}
844
846 return BitVector(1).ibvsle(*this, bv);
847}
848
850 return BitVector(1).ibvsgt(*this, bv);
851}
852
854 return BitVector(1).ibvsge(*this, bv);
855}
856
857BitVector BitVector::bvshl(uint64_t shift) const {
858 return BitVector(d_size).ibvshl(*this, shift);
859}
860
862 return BitVector(d_size).ibvshl(*this, bv);
863}
864
865BitVector BitVector::bvshr(uint64_t shift) const {
866 return BitVector(d_size).ibvshr(*this, shift);
867}
868
870 return BitVector(d_size).ibvshr(*this, bv);
871}
872
873BitVector BitVector::bvashr(uint64_t shift) const {
874 return BitVector(d_size).ibvashr(*this, shift);
875}
876
878 return BitVector(d_size).ibvashr(*this, bv);
879}
880
882 return BitVector(d_size).ibvmul(*this, bv);
883}
884
886 return BitVector(d_size).ibvudiv(*this, bv);
887}
888
890 return BitVector(d_size).ibvurem(*this, bv);
891}
892
894 return BitVector(d_size).ibvsdiv(*this, bv);
895}
896
898 return BitVector(d_size).ibvsrem(*this, bv);
899}
900
902 return BitVector(d_size).ibvsmod(*this, bv);
903}
904
906 return BitVector(d_size).ibvuaddo(*this, bv);
907}
908
910 return BitVector(d_size).ibvsaddo(*this, bv);
911}
912
914 return BitVector(d_size).ibvusubo(*this, bv);
915}
916
918 return BitVector(d_size).ibvssubo(*this, bv);
919}
920
922 return BitVector(d_size).ibvumulo(*this, bv);
923}
924
926 return BitVector(d_size).ibvsmulo(*this, bv);
927}
928
930 return BitVector(d_size).ibvsdivo(*this, bv);
931}
932
934 return BitVector(d_size).ibvconcat(*this, bv);
935}
936
937BitVector BitVector::bvextract(uint64_t idx_hi, uint64_t idx_lo) const {
938 return BitVector(d_size).ibvextract(*this, idx_hi, idx_lo);
939}
940
941BitVector BitVector::bvzext(uint64_t n) const {
942 return BitVector(d_size).ibvzext(*this, n);
943}
944
945BitVector BitVector::bvsext(uint64_t n) const {
946 return BitVector(d_size).ibvsext(*this, n);
947}
948
950 return BitVector(d_size).ibvrepeat(*this, n);
951}
952
953BitVector BitVector::bvroli(uint64_t n) const {
954 return BitVector(d_size).ibvroli(*this, n);
955}
956
958 return BitVector(d_size).ibvrol(*this, n);
959}
960
961BitVector BitVector::bvrori(uint64_t n) const {
962 return BitVector(d_size).ibvrori(*this, n);
963}
964
966 return BitVector(d_size).ibvror(*this, n);
967}
968
970 return BitVector(d_size).ibvmodinv(*this);
971}
972
973BitVector BitVector::bvpow(const mpz_t exp) const {
974 return BitVector(d_size).ibvpow(*this, exp);
975}
976
977/* -------------------------------------------------------------------------- */
978/* Bit-vector operations, in-place, requires all operands as arguments. */
979/* -------------------------------------------------------------------------- */
980
982 assert(!bv.is_null());
983
984 ibvnot(bv);
985
986 if (is_gmp()) {
989 }
990 else {
991 d_val_uint64 += 1;
993 }
994 return *this;
995}
996
998 assert(!bv.is_null());
999
1000 uint64_t size = bv.d_size;
1001
1002 if (bv.is_gmp()) {
1003 if (!is_gmp()) {
1005 }
1006 mpz_com(d_val_gmp, bv.d_val_gmp);
1008 }
1009 else {
1010 if (is_gmp()) {
1012 }
1013 d_val_uint64 = uint64_fdiv_r_2exp(size, ~bv.d_val_uint64);
1014 }
1015 d_size = size;
1016 return *this;
1017}
1018
1020 assert(!bv.is_null());
1021
1022 uint64_t size = bv.d_size;
1023
1024 if (bv.is_gmp()) {
1025 if (!is_gmp()) {
1027 }
1028 mpz_add_ui(d_val_gmp, bv.d_val_gmp, 1);
1030 }
1031 else {
1032 if (is_gmp()) {
1034 }
1035 d_val_uint64 = uint64_fdiv_r_2exp(size, bv.d_val_uint64 + 1);
1036 }
1037 d_size = size;
1038 return *this;
1039}
1040
1042 assert(!bv.is_null());
1043
1044 uint64_t size = bv.d_size;
1045
1046 if (bv.is_gmp()) {
1047 if (!is_gmp()) {
1049 }
1050 mpz_sub_ui(d_val_gmp, bv.d_val_gmp, 1);
1052 }
1053 else {
1054 if (is_gmp()) {
1056 }
1057 d_val_uint64 = uint64_fdiv_r_2exp(size, bv.d_val_uint64 - 1);
1058 }
1059 d_size = size;
1060 return *this;
1061}
1062
1064 assert(!bv.is_null());
1065 uint64_t val = 0;
1066 if (bv.is_ones()) {
1067 val = 1;
1068 }
1069 if (is_gmp())
1071 d_val_uint64 = val;
1072 d_size = 1;
1073 return *this;
1074}
1075
1077 assert(!bv.is_null());
1078 uint64_t val = 0;
1079 if (bv.is_gmp()) {
1081 for (size_t i = 0, n = mpz_size(bv.d_val_gmp); i < n; ++i) {
1082 limb = mpz_getlimbn(bv.d_val_gmp, i);
1083 if (((uint64_t)limb) != 0) {
1084 val = 1;
1085 break;
1086 }
1087 }
1088 }
1089 else if (bv.d_val_uint64 != 0) {
1090 val = 1;
1091 }
1092 if (is_gmp())
1094 d_val_uint64 = val;
1095 d_size = 1;
1096 return *this;
1097}
1098
1100 assert(!bv.is_null());
1101 uint64_t val = 0;
1102 if (bv.is_gmp()) {
1103 val = mpz_popcount(bv.d_val_gmp) % 2 > 0 ? 1 : 0;
1104 }
1105 else if (bv.d_val_uint64 != 0) {
1106 for (uint64_t i = 0; i < bv.d_size; ++i) {
1107 if (bv.bit(i)) {
1108 val ^= 1;
1109 }
1110 }
1111 }
1112 if (is_gmp())
1114 d_val_uint64 = val;
1115 d_size = 1;
1116 return *this;
1117}
1118
1120 assert(!bv.is_null());
1121 if (is_gmp())
1123 d_val_uint64 = bv.is_neg_overflow() ? 1 : 0;
1124 d_size = 1;
1125 return *this;
1126}
1127
1129 assert(!bv0.is_null());
1130 assert(!bv1.is_null());
1131 assert(bv0.d_size == bv1.d_size);
1132
1133 uint64_t size = bv0.d_size;
1134
1135 if (bv0.is_gmp()) {
1136 if (!is_gmp()) {
1138 }
1139 mpz_add(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1141 }
1142 else {
1143 if (is_gmp()) {
1145 }
1146 d_val_uint64 =
1147 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 + bv1.d_val_uint64);
1148 }
1149 d_size = size;
1150 return *this;
1151}
1152
1154 assert(!bv0.is_null());
1155 assert(!bv1.is_null());
1156 assert(bv0.d_size == bv1.d_size);
1157
1158 uint64_t size = bv0.d_size;
1159
1160 if (bv0.is_gmp()) {
1161 if (!is_gmp()) {
1163 }
1164 mpz_sub(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1166 }
1167 else {
1168 if (is_gmp()) {
1170 }
1171 d_val_uint64 =
1172 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 - bv1.d_val_uint64);
1173 }
1174 d_size = size;
1175 return *this;
1176}
1177
1179 assert(!bv0.is_null());
1180 assert(!bv1.is_null());
1181 assert(bv0.d_size == bv1.d_size);
1182
1183 uint64_t size = bv0.d_size;
1184
1185 if (bv0.is_gmp()) {
1186 if (!is_gmp()) {
1188 }
1189 mpz_and(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1191 }
1192 else {
1193 if (is_gmp()) {
1195 }
1196 d_val_uint64 =
1197 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 & bv1.d_val_uint64);
1198 }
1199 d_size = size;
1200 return *this;
1201}
1202
1204 assert(!bv0.is_null());
1205 assert(!bv1.is_null());
1206 assert(bv0.d_size == 1);
1207 assert(bv1.d_size == 1);
1208 uint64_t val = 0;
1209 if (bv0.is_false() || bv1.is_true()) {
1210 val = 1;
1211 }
1212 if (is_gmp())
1214 d_val_uint64 = val;
1215 d_size = 1;
1216 return *this;
1217}
1218
1220 assert(!bv0.is_null());
1221 assert(!bv1.is_null());
1222 assert(bv0.d_size == bv1.d_size);
1223
1224 uint64_t size = bv0.d_size;
1225
1226 if (bv0.is_gmp()) {
1227 if (!is_gmp()) {
1229 }
1230 mpz_and(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1233 }
1234 else {
1235 if (is_gmp()) {
1237 }
1238 d_val_uint64 =
1239 uint64_fdiv_r_2exp(size, ~(bv0.d_val_uint64 & bv1.d_val_uint64));
1240 }
1241 d_size = size;
1242 return *this;
1243}
1244
1246 assert(!bv0.is_null());
1247 assert(!bv1.is_null());
1248 assert(bv0.d_size == bv1.d_size);
1249
1250 uint64_t size = bv0.d_size;
1251
1252 if (bv0.is_gmp()) {
1253 if (!is_gmp()) {
1255 }
1256 mpz_ior(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1259 }
1260 else {
1261 if (is_gmp()) {
1263 }
1264 d_val_uint64 =
1265 uint64_fdiv_r_2exp(size, ~(bv0.d_val_uint64 | bv1.d_val_uint64));
1266 }
1267 d_size = size;
1268 return *this;
1269}
1270
1272 assert(!bv0.is_null());
1273 assert(!bv1.is_null());
1274 assert(bv0.d_size == bv1.d_size);
1275
1276 uint64_t size = bv0.d_size;
1277
1278 if (bv0.is_gmp()) {
1279 if (!is_gmp()) {
1281 }
1282 mpz_ior(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1284 }
1285 else {
1286 if (is_gmp()) {
1288 }
1289 d_val_uint64 =
1290 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 | bv1.d_val_uint64);
1291 }
1292 d_size = size;
1293 return *this;
1294}
1295
1297 assert(!bv0.is_null());
1298 assert(!bv1.is_null());
1299 assert(bv0.d_size == bv1.d_size);
1300
1301 uint64_t size = bv0.d_size;
1302
1303 if (bv0.is_gmp()) {
1304 if (!is_gmp()) {
1306 }
1307 mpz_xor(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1310 }
1311 else {
1312 if (is_gmp()) {
1314 }
1315 d_val_uint64 =
1316 uint64_fdiv_r_2exp(size, ~(bv0.d_val_uint64 ^ bv1.d_val_uint64));
1317 }
1318 d_size = size;
1319 return *this;
1320}
1321
1323 assert(!bv0.is_null());
1324 assert(!bv1.is_null());
1325 assert(bv0.d_size == bv1.d_size);
1326
1327 uint64_t size = bv0.d_size;
1328
1329 if (bv0.is_gmp()) {
1330 if (!is_gmp()) {
1332 }
1333 mpz_xor(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1335 }
1336 else {
1337 if (is_gmp()) {
1339 }
1340 d_val_uint64 =
1341 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 ^ bv1.d_val_uint64);
1342 }
1343 d_size = size;
1344 return *this;
1345}
1346
1348 assert(!bv0.is_null());
1349 assert(!bv1.is_null());
1350 assert(bv0.d_size == bv1.d_size);
1351 uint64_t val = 0;
1352 if (bv0.is_gmp()) {
1353 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) == 0) {
1354 val = 1;
1355 }
1356 }
1357 else if (bv0.d_val_uint64 == bv1.d_val_uint64) {
1358 val = 1;
1359 }
1360 if (is_gmp())
1362 d_val_uint64 = val;
1363 d_size = 1;
1364 return *this;
1365}
1366
1368 assert(!bv0.is_null());
1369 assert(!bv1.is_null());
1370 assert(bv0.d_size == bv1.d_size);
1371 uint64_t val = 0;
1372 if (bv0.is_gmp()) {
1373 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) != 0) {
1374 val = 1;
1375 }
1376 }
1377 else if (bv0.d_val_uint64 != bv1.d_val_uint64) {
1378 val = 1;
1379 }
1380 if (is_gmp())
1382 d_val_uint64 = val;
1383 d_size = 1;
1384 return *this;
1385}
1386
1388 assert(!bv0.is_null());
1389 assert(!bv1.is_null());
1390 assert(bv0.d_size == bv1.d_size);
1391 uint64_t val = 0;
1392 if (bv0.is_gmp()) {
1393 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) < 0) {
1394 val = 1;
1395 }
1396 }
1397 else if (bv0.d_val_uint64 < bv1.d_val_uint64) {
1398 val = 1;
1399 }
1400 if (is_gmp())
1402 d_val_uint64 = val;
1403 d_size = 1;
1404 return *this;
1405}
1406
1408 assert(!bv0.is_null());
1409 assert(!bv1.is_null());
1410 assert(bv0.d_size == bv1.d_size);
1411 uint64_t val = 0;
1412 if (bv0.is_gmp()) {
1413 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) <= 0) {
1414 val = 1;
1415 }
1416 }
1417 else if (bv0.d_val_uint64 <= bv1.d_val_uint64) {
1418 val = 1;
1419 }
1420 if (is_gmp())
1422 d_val_uint64 = val;
1423 d_size = 1;
1424 return *this;
1425}
1426
1428 assert(!bv0.is_null());
1429 assert(!bv1.is_null());
1430 assert(bv0.d_size == bv1.d_size);
1431 uint64_t val = 0;
1432 if (bv0.is_gmp()) {
1433 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) > 0) {
1434 val = 1;
1435 }
1436 }
1437 else if (bv0.d_val_uint64 > bv1.d_val_uint64) {
1438 val = 1;
1439 }
1440 if (is_gmp())
1442 d_val_uint64 = val;
1443 d_size = 1;
1444 return *this;
1445}
1446
1448 assert(!bv0.is_null());
1449 assert(!bv1.is_null());
1450 assert(bv0.d_size == bv1.d_size);
1451 uint64_t val = 0;
1452 if (bv0.is_gmp()) {
1453 if (mpz_cmp(bv0.d_val_gmp, bv1.d_val_gmp) >= 0) {
1454 val = 1;
1455 }
1456 }
1457 else if (bv0.d_val_uint64 >= bv1.d_val_uint64) {
1458 val = 1;
1459 }
1460 if (is_gmp())
1462 d_val_uint64 = val;
1463 d_size = 1;
1464 return *this;
1465}
1466
1468 assert(!bv0.is_null());
1469 assert(!bv1.is_null());
1470 assert(bv0.d_size == bv1.d_size);
1471 bool msb_bv0 = bv0.msb();
1472 bool msb_bv1 = bv1.msb();
1473 if (msb_bv0 && !msb_bv1) {
1474 if (is_gmp())
1476 d_val_uint64 = 1;
1477 d_size = 1;
1478 }
1479 else if (!msb_bv0 && msb_bv1) {
1480 if (is_gmp())
1482 d_val_uint64 = 0;
1483 d_size = 1;
1484 }
1485 else {
1486 ibvult(bv0, bv1);
1487 }
1488 return *this;
1489}
1490
1492 assert(!bv0.is_null());
1493 assert(!bv1.is_null());
1494 assert(bv0.d_size == bv1.d_size);
1495 bool msb_bv0 = bv0.msb();
1496 bool msb_bv1 = bv1.msb();
1497 if (msb_bv0 && !msb_bv1) {
1498 if (is_gmp())
1500 d_val_uint64 = 1;
1501 d_size = 1;
1502 }
1503 else if (!msb_bv0 && msb_bv1) {
1504 if (is_gmp())
1506 d_val_uint64 = 0;
1507 d_size = 1;
1508 }
1509 else {
1510 ibvule(bv0, bv1);
1511 }
1512 return *this;
1513}
1514
1516 assert(!bv0.is_null());
1517 assert(!bv1.is_null());
1518 assert(bv0.d_size == bv1.d_size);
1519 bool msb_bv0 = bv0.msb();
1520 bool msb_bv1 = bv1.msb();
1521 if (msb_bv0 && !msb_bv1) {
1522 if (is_gmp())
1524 d_val_uint64 = 0;
1525 d_size = 1;
1526 }
1527 else if (!msb_bv0 && msb_bv1) {
1528 if (is_gmp())
1530 d_val_uint64 = 1;
1531 d_size = 1;
1532 }
1533 else {
1534 ibvugt(bv0, bv1);
1535 }
1536 return *this;
1537}
1538
1540 assert(!bv0.is_null());
1541 assert(!bv1.is_null());
1542 assert(bv0.d_size == bv1.d_size);
1543 bool msb_bv0 = bv0.msb();
1544 bool msb_bv1 = bv1.msb();
1545 if (msb_bv0 && !msb_bv1) {
1546 if (is_gmp())
1548 d_val_uint64 = 0;
1549 d_size = 1;
1550 }
1551 else if (!msb_bv0 && msb_bv1) {
1552 if (is_gmp())
1554 d_val_uint64 = 1;
1555 d_size = 1;
1556 }
1557 else {
1558 ibvuge(bv0, bv1);
1559 }
1560 return *this;
1561}
1562
1563BitVector &BitVector::ibvshl(const BitVector &bv, uint64_t shift) {
1564 assert(!bv.is_null());
1565
1566 uint64_t size = bv.d_size;
1567
1568 if (bv.is_gmp()) {
1569 if (!is_gmp()) {
1571 }
1572 if (shift >= size) {
1574 }
1575 else {
1576 mpz_mul_2exp_ull(d_val_gmp, bv.d_val_gmp, shift);
1578 }
1579 }
1580 else {
1581 if (is_gmp()) {
1583 }
1584 if (shift >= size) {
1585 d_val_uint64 = 0;
1586 }
1587 else {
1588 d_val_uint64 = uint64_fdiv_r_2exp(size, bv.d_val_uint64 << shift);
1589 }
1590 }
1591 d_size = size;
1592 return *this;
1593}
1594
1596 assert(!bv.is_null());
1597 assert(!shift.is_null());
1598 assert(bv.d_size == shift.d_size);
1599
1601 uint64_t size = bv.d_size;
1602
1603 if (shift.shift_is_uint64(&ishift)) {
1604 ibvshl(bv, ishift);
1605 }
1606 else {
1607 if (bv.is_gmp()) {
1608 if (!is_gmp()) {
1610 }
1612 }
1613 else {
1614 if (is_gmp()) {
1616 }
1617 d_val_uint64 = 0;
1618 }
1619 }
1620 d_size = size;
1621 return *this;
1622}
1623
1624BitVector &BitVector::ibvshr(const BitVector &bv, uint64_t shift) {
1625 assert(!bv.is_null());
1626
1627 uint64_t size = bv.d_size;
1628
1629 if (bv.is_gmp()) {
1630 if (!is_gmp()) {
1632 }
1633 if (shift >= size) {
1635 }
1636 else {
1638 }
1639 }
1640 else {
1641 if (is_gmp()) {
1643 }
1644 if (shift >= size) {
1645 d_val_uint64 = 0;
1646 }
1647 else {
1648 d_val_uint64 = uint64_fdiv_r_2exp(size, bv.d_val_uint64 >> shift);
1649 }
1650 }
1651 d_size = size;
1652 return *this;
1653}
1654
1656 assert(!bv.is_null());
1657 assert(!shift.is_null());
1658 assert(bv.d_size == shift.d_size);
1659
1661 uint64_t size = bv.d_size;
1662
1663 if (shift.shift_is_uint64(&ishift)) {
1664 ibvshr(bv, ishift);
1665 }
1666 else {
1667 if (bv.is_gmp()) {
1668 if (!is_gmp()) {
1670 }
1672 }
1673 else {
1674 if (is_gmp()) {
1676 }
1677 d_val_uint64 = 0;
1678 }
1679 }
1680 d_size = size;
1681 return *this;
1682}
1683
1684BitVector &BitVector::ibvashr(const BitVector &bv, uint64_t shift) {
1685 assert(!bv.is_null());
1686
1687 if (bv.msb()) {
1689 }
1690 else {
1691 ibvshr(bv, shift);
1692 }
1693 return *this;
1694}
1695
1697 assert(!bv.is_null());
1698 assert(!shift.is_null());
1699 assert(bv.d_size == shift.d_size);
1700
1701 if (bv.msb()) {
1702 if (&shift == this) {
1703 BitVector b1(shift); /* copy to guard for bv1 == *this */
1704 ibvnot(bv).ibvshr(b1);
1705 }
1706 else {
1708 }
1709 ibvnot();
1710 }
1711 else {
1712 ibvshr(bv, shift);
1713 }
1714 return *this;
1715}
1716
1718 assert(!bv0.is_null());
1719 assert(!bv1.is_null());
1720 assert(bv0.d_size == bv1.d_size);
1721
1722 uint64_t size = bv0.d_size;
1723
1724 if (bv0.is_gmp()) {
1725 if (!is_gmp()) {
1727 }
1728 mpz_mul(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1730 }
1731 else {
1732 if (is_gmp()) {
1734 }
1735 d_val_uint64 =
1736 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 * bv1.d_val_uint64);
1737 }
1738 d_size = size;
1739 return *this;
1740}
1741
1743 assert(!bv0.is_null());
1744 assert(!bv1.is_null());
1745 assert(bv0.d_size == bv1.d_size);
1746
1747 uint64_t size = bv0.d_size;
1748
1749 if (bv0.is_gmp()) {
1750 if (!is_gmp()) {
1752 }
1753 if (bv1.is_zero()) {
1757 }
1758 else {
1759 mpz_fdiv_q(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1761 }
1762 }
1763 else {
1764 if (is_gmp()) {
1766 }
1767 if (bv1.is_zero()) {
1769 }
1770 else {
1771 d_val_uint64 = bv0.d_val_uint64 / bv1.d_val_uint64;
1772 }
1773 }
1774 d_size = size;
1775 return *this;
1776}
1777
1779 assert(!bv0.is_null());
1780 assert(!bv1.is_null());
1781 assert(bv0.d_size == bv1.d_size);
1782
1783 uint64_t size = bv0.d_size;
1784
1785 if (bv0.is_gmp()) {
1786 if (!is_gmp()) {
1788 }
1789 if (!bv1.is_zero()) {
1790 mpz_fdiv_r(d_val_gmp, bv0.d_val_gmp, bv1.d_val_gmp);
1792 }
1793 else {
1794 mpz_set(d_val_gmp, bv0.d_val_gmp);
1795 }
1796 }
1797 else {
1798 if (is_gmp()) {
1800 }
1801 if (!bv1.is_zero()) {
1802 d_val_uint64 =
1803 uint64_fdiv_r_2exp(size, bv0.d_val_uint64 % bv1.d_val_uint64);
1804 }
1805 else {
1806 d_val_uint64 = uint64_fdiv_r_2exp(size, bv0.d_val_uint64);
1807 }
1808 }
1809 d_size = size;
1810 return *this;
1811}
1812
1814 assert(!bv0.is_null());
1815 assert(!bv1.is_null());
1816 assert(bv0.d_size == bv1.d_size);
1817 bool is_signed_bv0 = bv0.msb();
1818 bool is_signed_bv1 = bv1.msb();
1819
1820 if (is_signed_bv0 && !is_signed_bv1) {
1821 if (&bv1 == this) {
1822 BitVector b1(bv1); /* copy to guard for bv1 == *this */
1823 ibvneg(bv0).ibvudiv(b1);
1824 }
1825 else {
1827 }
1828 ibvneg(*this);
1829 }
1830 else if (!is_signed_bv0 && is_signed_bv1) {
1831 if (&bv0 == this) {
1832 BitVector b0(bv0); /* copy to guard for bv0 == *this */
1833 ibvneg(bv1).ibvudiv(b0, *this);
1834 }
1835 else {
1836 ibvneg(bv1).ibvudiv(bv0, *this);
1837 }
1838 ibvneg(*this);
1839 }
1840 else if (is_signed_bv0 && is_signed_bv1) {
1841 BitVector b1neg(bv1.bvneg());
1843 }
1844 else {
1845 ibvudiv(bv0, bv1);
1846 }
1847 return *this;
1848}
1849
1851 assert(!bv0.is_null());
1852 assert(!bv1.is_null());
1853 assert(bv0.d_size == bv1.d_size);
1854 bool is_signed_bv0 = bv0.msb();
1855 bool is_signed_bv1 = bv1.msb();
1856
1857 if (is_signed_bv0 && !is_signed_bv1) {
1858 if (&bv1 == this) {
1859 BitVector b1(bv1); /* copy to guard for bv1 == *this */
1860 ibvneg(bv0).ibvurem(b1);
1861 }
1862 else {
1864 }
1865 ibvneg(*this);
1866 }
1867 else if (!is_signed_bv0 && is_signed_bv1) {
1868 if (&bv0 == this) {
1869 BitVector b0(bv0); /* copy to guard for bv0 == *this */
1870 ibvneg(bv1).ibvurem(b0, *this);
1871 }
1872 else {
1873 ibvneg(bv1).ibvurem(bv0, *this);
1874 }
1875 }
1876 else if (is_signed_bv0 && is_signed_bv1) {
1877 BitVector b1neg(bv1.bvneg());
1879 }
1880 else {
1881 ibvurem(bv0, bv1);
1882 }
1883 return *this;
1884}
1885
1887 assert(!bv0.is_null());
1888 assert(!bv1.is_null());
1889 assert(bv0.d_size == bv1.d_size);
1890 bool is_signed_bv0 = bv0.msb();
1891 bool is_signed_bv1 = bv1.msb();
1892
1893 const BitVector *b0, *b1;
1894 BitVector bb0, bb1;
1895 /* copy to guard for bv1 == *this */
1896 if (&bv0 == this) {
1897 bb0 = bv0;
1898 b0 = &bb0;
1899 }
1900 else {
1901 b0 = &bv0;
1902 }
1903 if (&bv1 == this) {
1904 bb1 = bv1;
1905 b1 = &bb1;
1906 }
1907 else {
1908 b1 = &bv1;
1909 }
1910
1911 if (is_signed_bv0 && !is_signed_bv1) {
1912 ibvneg(bv0).ibvurem(*b1);
1913 if (!is_zero()) {
1914 ibvneg().ibvadd(*b1);
1915 }
1916 }
1917 else if (!is_signed_bv0 && is_signed_bv1) {
1918 ibvneg(bv1).ibvurem(*b0, *this);
1919 if (!is_zero()) {
1920 ibvadd(*b1);
1921 }
1922 }
1923 else if (is_signed_bv0 && is_signed_bv1) {
1924 ibvneg(*b0).ibvurem(b1->bvneg());
1925 if (!is_zero()) {
1926 ibvneg();
1927 }
1928 }
1929 else {
1930 ibvurem(bv0, bv1);
1931 }
1932 return *this;
1933}
1934
1936 assert(!bv0.is_null());
1937 assert(!bv1.is_null());
1938 assert(bv0.d_size == bv1.d_size);
1939
1940 uint64_t val = bv0.is_uadd_overflow(bv1) ? 1 : 0;
1941 if (is_gmp())
1943 d_val_uint64 = val;
1944 d_size = 1;
1945 return *this;
1946}
1947
1949 assert(!bv0.is_null());
1950 assert(!bv1.is_null());
1951 assert(bv0.d_size == bv1.d_size);
1952
1953 uint64_t val = bv0.is_sadd_overflow(bv1) ? 1 : 0;
1954 if (is_gmp())
1956 d_val_uint64 = val;
1957 d_size = 1;
1958 return *this;
1959}
1960
1962 assert(!bv0.is_null());
1963 assert(!bv1.is_null());
1964 assert(bv0.d_size == bv1.d_size);
1965
1966 uint64_t val = bv0.is_usub_overflow(bv1) ? 1 : 0;
1967 if (is_gmp())
1969 d_val_uint64 = val;
1970 d_size = 1;
1971 return *this;
1972}
1973
1975 assert(!bv0.is_null());
1976 assert(!bv1.is_null());
1977 assert(bv0.d_size == bv1.d_size);
1978
1979 uint64_t val = bv0.is_ssub_overflow(bv1) ? 1 : 0;
1980 if (is_gmp())
1982 d_val_uint64 = val;
1983 d_size = 1;
1984 return *this;
1985}
1986
1988 assert(!bv0.is_null());
1989 assert(!bv1.is_null());
1990 assert(bv0.d_size == bv1.d_size);
1991
1992 uint64_t val = bv0.is_umul_overflow(bv1) ? 1 : 0;
1993 if (is_gmp())
1995 d_val_uint64 = val;
1996 d_size = 1;
1997 return *this;
1998}
1999
2001 assert(!bv0.is_null());
2002 assert(!bv1.is_null());
2003 assert(bv0.d_size == bv1.d_size);
2004
2005 uint64_t val = bv0.is_smul_overflow(bv1) ? 1 : 0;
2006 if (is_gmp())
2008 d_val_uint64 = val;
2009 d_size = 1;
2010 return *this;
2011}
2012
2014 assert(!bv0.is_null());
2015 assert(!bv1.is_null());
2016 assert(bv0.d_size == bv1.d_size);
2017
2018 uint64_t val = bv0.is_sdiv_overflow(bv1) ? 1 : 0;
2019 if (is_gmp())
2021 d_val_uint64 = val;
2022 d_size = 1;
2023 return *this;
2024}
2025
2027 assert(!bv0.is_null());
2028 assert(!bv1.is_null());
2029 uint64_t size = bv0.d_size + bv1.d_size;
2030 const BitVector *b0, *b1;
2031 BitVector bb0, bb1;
2032
2033 /* copy to guard for bv0 == *this */
2034 if (&bv0 == this) {
2035 bb0 = bv0;
2036 b0 = &bb0;
2037 }
2038 else {
2039 b0 = &bv0;
2040 }
2041 /* copy to guard for bv1 == *this */
2042 if (&bv1 == this) {
2043 bb1 = bv1;
2044 b1 = &bb1;
2045 }
2046 else {
2047 b1 = &bv1;
2048 }
2049
2050 if (size > s_native_size) {
2051 if (!is_gmp()) {
2053 }
2054 if (b0->is_gmp()) {
2055 mpz_set(d_val_gmp, b0->d_val_gmp);
2056 }
2057 else {
2058 mpz_set_ui(d_val_gmp, b0->d_val_uint64);
2059 }
2061 if (b1->is_gmp()) {
2062 mpz_add(d_val_gmp, d_val_gmp, b1->d_val_gmp);
2063 }
2064 else {
2065 mpz_add_ui(d_val_gmp, d_val_gmp, b1->d_val_uint64);
2066 }
2068 }
2069 else {
2070 if (is_gmp()) {
2072 }
2073 d_val_uint64 = b0->d_val_uint64 << b1->d_size;
2075 }
2076 d_size = size;
2077 return *this;
2078}
2079
2080BitVector &BitVector::ibvextract(const BitVector &bv, uint64_t idx_hi, uint64_t idx_lo) {
2081 assert(!bv.is_null());
2082 assert(idx_hi >= idx_lo);
2083 assert(idx_hi < bv.size());
2084 uint64_t size = idx_hi - idx_lo + 1;
2085
2086 if (is_gmp()) {
2087 if (bv.is_gmp()) {
2088 mpz_set(d_val_gmp, bv.d_val_gmp);
2089 }
2090 else {
2091 mpz_set_ui(d_val_gmp, bv.d_val_uint64);
2092 }
2095 if (size <= s_native_size) {
2098 d_val_uint64 = val;
2099 }
2100 }
2101 else {
2102 if (size > s_native_size) {
2104 if (bv.is_gmp()) {
2105 mpz_set(d_val_gmp, bv.d_val_gmp);
2106 }
2107 else {
2108 mpz_set_ui(d_val_gmp, bv.d_val_uint64);
2109 }
2112 }
2113 else {
2114 if (bv.is_gmp()) {
2115 mpz_t tmp;
2116 mpz_init_set(tmp, bv.d_val_gmp);
2120 mpz_clear(tmp);
2121 }
2122 else {
2123 d_val_uint64 = uint64_fdiv_r_2exp(idx_hi + 1, bv.d_val_uint64);
2124 d_val_uint64 >>= idx_lo;
2125 }
2126 }
2127 }
2128 d_size = size;
2129 return *this;
2130}
2131
2132BitVector &BitVector::ibvzext(const BitVector &bv, uint64_t n) {
2133 assert(!bv.is_null());
2134
2135 if (n == 0 && &bv == this)
2136 return *this;
2137
2138 uint64_t size = bv.d_size + n;
2139
2140 if (is_gmp()) {
2141 if (bv.is_gmp()) {
2142 mpz_set(d_val_gmp, bv.d_val_gmp);
2143 }
2144 else {
2145 mpz_set_ui(d_val_gmp, bv.d_val_uint64);
2146 }
2147 if (size <= s_native_size) {
2150 d_val_uint64 = val;
2151 }
2152 }
2153 else {
2154 if (size > s_native_size) {
2155 /* copy to guard for bv == *this */
2156 const BitVector *b;
2157 BitVector bb;
2158 if (&bv == this) {
2159 bb = bv;
2160 b = &bb;
2161 }
2162 else {
2163 b = &bv;
2164 }
2166 if (b->is_gmp()) {
2167 mpz_set(d_val_gmp, b->d_val_gmp);
2168 }
2169 else {
2170 mpz_set_ui(d_val_gmp, b->d_val_uint64);
2171 }
2172 }
2173 else {
2174 d_val_uint64 = bv.d_val_uint64;
2175 }
2176 }
2177 d_size = size;
2178 return *this;
2179}
2180
2181BitVector &BitVector::ibvsext(const BitVector &bv, uint64_t n) {
2182 assert(!bv.is_null());
2183
2184 if (n > 0) {
2185 /* copy to guard for bv == *this */
2186 const BitVector *b;
2187 BitVector bb;
2188 if (&bv == this) {
2189 bb = bv;
2190 b = &bb;
2191 }
2192 else {
2193 b = &bv;
2194 }
2195 if (b->msb()) {
2197 uint64_t size = b_size + n;
2198 if (is_gmp()) {
2203 if (b->is_gmp()) {
2204 mpz_add(d_val_gmp, d_val_gmp, b->d_val_gmp);
2205 }
2206 else {
2207 mpz_add_ui(d_val_gmp, d_val_gmp, b->d_val_uint64);
2208 }
2210 if (size <= s_native_size) {
2213 d_val_uint64 = val;
2214 }
2215 }
2216 else {
2217 if (size > s_native_size) {
2222 if (b->is_gmp()) {
2223 mpz_add(d_val_gmp, d_val_gmp, b->d_val_gmp);
2224 }
2225 else {
2226 mpz_add_ui(d_val_gmp, d_val_gmp, b->d_val_uint64);
2227 }
2229 }
2230 else {
2232 d_val_uint64 =
2233 uint64_fdiv_r_2exp(size, d_val_uint64 + b->d_val_uint64);
2234 }
2235 }
2236 d_size = size;
2237 }
2238 else {
2239 ibvzext(bv, n);
2240 }
2241 }
2242 else if (&bv != this) {
2243 if (is_gmp()) {
2244 if (bv.is_gmp()) {
2245 mpz_set(d_val_gmp, bv.d_val_gmp);
2246 }
2247 else {
2249 d_val_uint64 = bv.d_val_uint64;
2250 }
2251 }
2252 else {
2253 if (bv.is_gmp()) {
2255 mpz_set(d_val_gmp, bv.d_val_gmp);
2256 }
2257 else {
2258 d_val_uint64 = bv.d_val_uint64;
2259 }
2260 }
2261 d_size = bv.d_size;
2262 }
2263 return *this;
2264}
2265
2267 assert(!bv.is_null());
2268 assert(n > 0);
2269
2270 uint64_t size = n * bv.d_size;
2271 const BitVector *b;
2272 BitVector bb;
2273
2274 /* copy to guard for bv0 == *this */
2275 if (&bv == this) {
2276 bb = bv;
2277 b = &bb;
2278 }
2279 else {
2280 b = &bv;
2281 }
2282
2283 if (size > s_native_size) {
2284 if (!is_gmp()) {
2286 }
2287 if (b->is_gmp()) {
2288 mpz_set(d_val_gmp, b->d_val_gmp);
2289 }
2290 else {
2291 mpz_set_ui(d_val_gmp, b->d_val_uint64);
2292 }
2293
2294 for (uint64_t i = 1; i < n; ++i) {
2296 if (b->is_gmp()) {
2297 mpz_add(d_val_gmp, d_val_gmp, b->d_val_gmp);
2298 }
2299 else {
2300 mpz_add_ui(d_val_gmp, d_val_gmp, b->d_val_uint64);
2301 }
2302 }
2304 }
2305 else {
2306 if (is_gmp()) {
2308 }
2309 d_val_uint64 = b->d_val_uint64;
2310 for (uint64_t i = 1; i < n; ++i) {
2311 d_val_uint64 <<= b->d_size;
2313 }
2314 }
2315 d_size = size;
2316 return *this;
2317}
2318
2319BitVector &BitVector::ibvroli(const BitVector &bv, uint64_t n) {
2320 assert(!bv.is_null());
2321
2322 uint64_t size = bv.d_size;
2323 uint64_t rot = n % size;
2324 const BitVector *b;
2325 BitVector bb;
2326
2327 /* copy to guard for bv0 == *this */
2328 if (&bv == this) {
2329 bb = bv;
2330 b = &bb;
2331 }
2332 else {
2333 b = &bv;
2334 }
2335
2336 if (size > s_native_size) {
2337 if (!is_gmp()) {
2339 }
2340 if (b->is_gmp()) {
2341 mpz_set(d_val_gmp, b->d_val_gmp);
2342 }
2343 else {
2344 mpz_set_ui(d_val_gmp, b->d_val_uint64);
2345 }
2346 if (rot) {
2347 // shl by number of bits to rotate left
2349 // add bits that were rotated out
2350 d_size = size;
2351 ibvadd(b->bvshr(size - rot));
2352 }
2353 }
2354 else {
2355 if (is_gmp()) {
2357 }
2358 d_val_uint64 = uint64_fdiv_r_2exp(size, b->d_val_uint64 << rot);
2359 if (rot) {
2360 d_val_uint64 += uint64_fdiv_r_2exp(size, b->d_val_uint64 >> (size - rot));
2361 }
2362 d_size = size;
2363 }
2364 return *this;
2365}
2366
2368 assert(!bv.is_null());
2369 assert(!n.is_null());
2370 assert(bv.d_size == n.d_size);
2371
2372 uint64_t in;
2373 uint64_t size = bv.d_size;
2374
2375 if (!n.shift_is_uint64(&in)) {
2377 bool isuint = m.shift_is_uint64(&in);
2378 (void)isuint;
2379 assert(isuint);
2380 }
2381 ibvroli(bv, in);
2382 d_size = size;
2383 return *this;
2384}
2385
2386BitVector &BitVector::ibvrori(const BitVector &bv, uint64_t n) {
2387 assert(!bv.is_null());
2388
2389 uint64_t size = bv.d_size;
2390 uint64_t rot = n % size;
2391 const BitVector *b;
2392 BitVector bb;
2393
2394 /* copy to guard for bv0 == *this */
2395 if (&bv == this) {
2396 bb = bv;
2397 b = &bb;
2398 }
2399 else {
2400 b = &bv;
2401 }
2402
2403 if (size > s_native_size) {
2404 if (!is_gmp()) {
2406 }
2407 if (b->is_gmp()) {
2408 mpz_set(d_val_gmp, b->d_val_gmp);
2409 }
2410 else {
2411 mpz_set_ui(d_val_gmp, b->d_val_uint64);
2412 }
2413 if (rot) {
2414 // shr by number of bits to rotate left
2416 // add bits that were rotated out
2417 d_size = size;
2418 ibvadd(b->bvshl(size - rot));
2419 }
2420 }
2421 else {
2422 if (is_gmp()) {
2424 }
2425 d_val_uint64 = uint64_fdiv_r_2exp(size, b->d_val_uint64 >> rot);
2426 if (rot) {
2427 d_val_uint64 += uint64_fdiv_r_2exp(size, b->d_val_uint64 << (size - rot));
2428 }
2429 d_size = size;
2430 }
2431 return *this;
2432}
2433
2435 assert(!bv.is_null());
2436 assert(!n.is_null());
2437 assert(bv.d_size == n.d_size);
2438
2439 uint64_t in;
2440 uint64_t size = bv.d_size;
2441
2442 if (!n.shift_is_uint64(&in)) {
2444 bool isuint = m.shift_is_uint64(&in);
2445 (void)isuint;
2446 assert(isuint);
2447 }
2448 ibvrori(bv, in);
2449 d_size = size;
2450 return *this;
2451}
2452
2454 assert(!c.is_null());
2455 assert(!t.is_null());
2456 assert(!e.is_null());
2457 assert(c.d_size == 1);
2458 assert(e.d_size == t.d_size);
2459
2460 uint64_t size = t.d_size;
2461
2462 if (c.is_true()) {
2463 if (t.is_gmp()) {
2464 if (!is_gmp()) {
2466 }
2467 mpz_set(d_val_gmp, t.d_val_gmp);
2468 }
2469 else {
2470 if (is_gmp()) {
2472 }
2473 d_val_uint64 = t.d_val_uint64;
2474 }
2475 }
2476 else {
2477 if (e.is_gmp()) {
2478 if (!is_gmp()) {
2480 }
2482 }
2483 else {
2484 if (is_gmp()) {
2486 }
2488 }
2489 }
2490 d_size = size;
2491 return *this;
2492}
2493
2495 assert(!bv.is_null());
2496 assert(bv.lsb()); /* must be odd */
2497
2498 const BitVector *pb;
2499 BitVector bb;
2500
2501 if (&bv == this) {
2502 bb = bv;
2503 pb = &bb;
2504 }
2505 else {
2506 pb = &bv;
2507 }
2508
2510
2511 if (d_size == 1) {
2512 if (pb->is_gmp()) {
2513 if (!is_gmp()) {
2515 }
2516 else {
2518 }
2519 }
2520 else {
2521 if (is_gmp()) {
2523 }
2524 d_val_uint64 = 1;
2525 }
2526 }
2527 else {
2528 if (pb->is_gmp()) {
2529 if (!is_gmp()) {
2531 }
2532 mpz_t two;
2533 mpz_init(two);
2535 mpz_invert(d_val_gmp, pb->d_val_gmp, two);
2537 mpz_clear(two);
2538 }
2539 else {
2540 if (is_gmp()) {
2542 }
2543 /* a = 2^bw
2544 * b = bv
2545 * lx * a + ly * b = gcd (a, b) = 1
2546 * -> lx * a = lx * 2^bw = 0 (2^bw_[bw] = 0)
2547 * -> ly * b = bv^-1 * bv = 1
2548 * -> ly is modular inverse of bv */
2549 uint64_t esize = size + 1;
2550 BitVector a(esize), b(esize);
2551
2552 a.set_bit(size, 1); /* 2^d_size */
2553 /* b is this bit-vector extended to esize */
2554 if (esize > s_native_size) {
2555 mpz_set_ui(b.d_val_gmp, pb->d_val_uint64);
2556 }
2557 else {
2558 b.d_val_uint64 = pb->d_val_uint64;
2559 }
2560
2561 BitVector y = mk_one(esize), ty, yq;
2563 BitVector q, r;
2564 while (!b.is_zero()) {
2565 a.bvudivurem(b, &q, &r);
2566 a = b;
2567 b = r;
2568 ty = y;
2569 yq = y.bvmul(q);
2570 y = ly.bvsub(yq);
2571 ly = ty;
2572 }
2573 d_val_uint64 = ly.bvextract(size - 1, 0).d_val_uint64;
2574 }
2575 }
2576 d_size = size;
2577#ifndef NDEBUG
2578 mpz_t ty;
2579 mpz_init(ty);
2580 if (is_gmp()) {
2581 mpz_mul(ty, pb->d_val_gmp, d_val_gmp);
2582 }
2583 else {
2584 if (pb->is_gmp()) {
2585 mpz_mul_ui(ty, pb->d_val_gmp, d_val_uint64);
2586 }
2587 else {
2588 mpz_set_ui(ty, pb->d_val_uint64);
2590 }
2591 }
2593 assert(!mpz_cmp_ui(ty, 1));
2594 mpz_clear(ty);
2595#endif
2596 return *this;
2597}
2598
2599/* -------------------------------------------------------------------------- */
2600/* Bit-vector operations, in-place, 'this' is first argument. */
2601/* -------------------------------------------------------------------------- */
2602
2604 ibvneg(*this);
2605 return *this;
2606}
2607
2609 ibvnot(*this);
2610 return *this;
2611}
2612
2614 ibvinc(*this);
2615 return *this;
2616}
2617
2619 ibvdec(*this);
2620 return *this;
2621}
2622
2624 ibvredand(*this);
2625 return *this;
2626}
2627
2629 ibvredor(*this);
2630 return *this;
2631}
2632
2634 ibvredxor(*this);
2635 return *this;
2636}
2637
2639 ibvnego(*this);
2640 return *this;
2641}
2642
2644 ibvadd(*this, bv);
2645 return *this;
2646}
2647
2649 ibvand(*this, bv);
2650 return *this;
2651}
2652
2654 ibvimplies(*this, bv);
2655 return *this;
2656}
2657
2659 ibvnand(*this, bv);
2660 return *this;
2661}
2662
2664 ibvnor(*this, bv);
2665 return *this;
2666}
2667
2669 ibvor(*this, bv);
2670 return *this;
2671}
2672
2674 ibvsub(*this, bv);
2675 return *this;
2676}
2677
2679 ibvxnor(*this, bv);
2680 return *this;
2681}
2682
2684 ibvxor(*this, bv);
2685 return *this;
2686}
2687
2689 ibveq(*this, bv);
2690 return *this;
2691}
2692
2694 ibvne(*this, bv);
2695 return *this;
2696}
2697
2699 ibvult(*this, bv);
2700 return *this;
2701}
2702
2704 ibvule(*this, bv);
2705 return *this;
2706}
2707
2709 ibvugt(*this, bv);
2710 return *this;
2711}
2712
2714 ibvuge(*this, bv);
2715 return *this;
2716}
2717
2719 ibvslt(*this, bv);
2720 return *this;
2721}
2722
2724 ibvsle(*this, bv);
2725 return *this;
2726}
2727
2729 ibvsgt(*this, bv);
2730 return *this;
2731}
2732
2734 ibvsge(*this, bv);
2735 return *this;
2736}
2737
2739 ibvshl(*this, shift);
2740 return *this;
2741}
2742
2744 ibvshl(*this, bv);
2745 return *this;
2746}
2747
2749 ibvshr(*this, shift);
2750 return *this;
2751}
2752
2754 ibvshr(*this, bv);
2755 return *this;
2756}
2757
2759 ibvashr(*this, shift);
2760 return *this;
2761}
2762
2764 ibvashr(*this, bv);
2765 return *this;
2766}
2767
2769 ibvmul(*this, bv);
2770 return *this;
2771}
2772
2774 ibvudiv(*this, bv);
2775 return *this;
2776}
2777
2779 ibvurem(*this, bv);
2780 return *this;
2781}
2782
2784 ibvsdiv(*this, bv);
2785 return *this;
2786}
2787
2789 ibvsrem(*this, bv);
2790 return *this;
2791}
2792
2794 ibvsmod(*this, bv);
2795 return *this;
2796}
2797
2799 ibvuaddo(*this, bv);
2800 return *this;
2801}
2802
2804 ibvsaddo(*this, bv);
2805 return *this;
2806}
2807
2809 ibvusubo(*this, bv);
2810 return *this;
2811}
2812
2814 ibvssubo(*this, bv);
2815 return *this;
2816}
2817
2819 ibvumulo(*this, bv);
2820 return *this;
2821}
2822
2824 ibvsmulo(*this, bv);
2825 return *this;
2826}
2827
2829 ibvsdivo(*this, bv);
2830 return *this;
2831}
2832
2834 ibvconcat(*this, bv);
2835 return *this;
2836}
2837
2838BitVector &BitVector::ibvextract(uint64_t idx_hi, uint64_t idx_lo) {
2839 ibvextract(*this, idx_hi, idx_lo);
2840 return *this;
2841}
2842
2844 ibvzext(*this, n);
2845 return *this;
2846}
2847
2849 ibvsext(*this, n);
2850 return *this;
2851}
2852
2854 ibvrepeat(*this, n);
2855 return *this;
2856}
2857
2859 ibvroli(*this, n);
2860 return *this;
2861}
2862
2864 ibvrol(*this, n);
2865 return *this;
2866}
2867
2869 ibvrori(*this, n);
2870 return *this;
2871}
2872
2874 ibvror(*this, n);
2875 return *this;
2876}
2877
2879 ibvmodinv(*this);
2880 return *this;
2881}
2882
2883BitVector &BitVector::ibvpow(const BitVector &base, const mpz_t exp) {
2884 mpz_t mod;
2885 mpz_init_set_ull(mod, 1);
2886 mpz_mul_2exp_ull(mod, mod, size());
2887 if (is_gmp()) {
2888 assert(base.is_gmp());
2889 mpz_powm(d_val_gmp, base.d_val_gmp, exp, mod);
2890 }
2891 else {
2892 assert(!base.is_gmp());
2893 mpz_t res;
2894 mpz_init_set_ull(res, base.d_val_uint64);
2895 mpz_powm(res, res, exp, mod);
2897 mpz_clear(res);
2898 }
2899 mpz_clear(mod);
2900 return *this;
2901}
2902
2903BitVector &BitVector::ibvpow(const mpz_t exp) {
2904 ibvpow(*this, exp);
2905 return *this;
2906}
2907
2908/* -------------------------------------------------------------------------- */
2909
2910void BitVector::bvudivurem(const BitVector &bv, BitVector *quot, BitVector *rem) const {
2911 assert(!is_null());
2912 assert(!bv.is_null());
2913 assert(quot != rem);
2914 assert(d_size == bv.d_size);
2915
2916 if (bv.is_zero()) {
2917 *rem = *this;
2918 *quot = mk_ones(d_size);
2919 }
2920 else {
2921 if (is_gmp()) {
2922 const BitVector *a, *b;
2923 BitVector aa, bb;
2924 /* copy to guard for quot == *this and rem == *this */
2925 if (this == quot || this == rem) {
2926 aa = *this;
2927 a = &aa;
2928 }
2929 else {
2930 a = this;
2931 }
2932 /* copy to guard for bv == *quot or bv == *rem */
2933 if (&bv == quot || &bv == rem) {
2934 bb = bv;
2935 b = &bb;
2936 }
2937 else {
2938 b = &bv;
2939 }
2940 *quot = mk_zero(d_size);
2941 *rem = mk_zero(d_size);
2942 mpz_fdiv_qr(quot->d_val_gmp, rem->d_val_gmp, a->d_val_gmp, b->d_val_gmp);
2943 mpz_fdiv_r_2exp_ull(quot->d_val_gmp, quot->d_val_gmp, d_size);
2944 mpz_fdiv_r_2exp_ull(rem->d_val_gmp, rem->d_val_gmp, d_size);
2945 }
2946 else {
2947 /* copy to guard for quot == *this and rem == *this */
2949 /* copy to guard for bv == *quot or bv == *rem */
2950 uint64_t b = bv.d_val_uint64;
2951 *quot = from_ui(d_size, a / b);
2952 *rem = from_ui(d_size, a % b);
2953 }
2954 }
2955}
2956
2957const mpz_t &BitVector::gmp_value() const {
2958 assert(is_gmp());
2959 return d_val_gmp;
2960}
2961
2962mpz_class BitVector::to_mpz() const {
2963 if (is_gmp()) {
2964 return mpz_class(d_val_gmp);
2965 }
2967}
2968
2969/* -------------------------------------------------------------------------- */
2970
2971#define BZLA_BV_MASK_BITS_UINT64(size)
2972
2973uint64_t BitVector::uint64_fdiv_r_2exp(uint64_t size, uint64_t val) {
2974 assert(size <= 64);
2975 if (size == 64)
2976 return val;
2977 return val & (UINT64_MAX >> (64 - size));
2978}
2979
2980uint64_t BitVector::count_leading(bool zeros) const {
2981 assert(!is_null());
2982 uint64_t res = 0;
2984
2986 /* The number of bits that spill over into the most significant limb,
2987 * assuming that all bits are represented). Zero if the bit-width is a
2988 * multiple of n_bits_per_limb. */
2990 /* The number of limbs required to represent the actual value.
2991 * Zero limbs are disregarded. */
2993
2994 if (n_limbs == 0)
2995 return d_size;
2996#if defined(__GNUC__) || defined(__clang__)
2997 res =
2998 n_bits_per_limb == 64
2999 ? static_cast<uint64_t>(__builtin_clzll(static_cast<uint64_t>(limb)))
3000 : static_cast<uint64_t>(__builtin_clz(static_cast<uint32_t>(limb)));
3001#else
3003#endif
3004 /* Number of limbs required when representing all bits. */
3008 return res;
3009}
3010
3011uint64_t BitVector::get_limb(void *limb, uint64_t nbits_rem, bool zeros) const {
3012 assert(!is_null());
3013 mp_limb_t *gmp_limb = static_cast<mp_limb_t *>(limb);
3015 mp_limb_t res = 0u, mask;
3016 const int32_t bits_per_limb = sizeof(d_val_uint64) * 8;
3017
3018 if (is_gmp()) {
3019 /* GMP normalizes the limbs, the left most (most significant) is never 0 */
3021 }
3022 else {
3023 if (d_val_uint64 == 0) {
3024 n_limbs = 0;
3025 }
3026 else {
3029 ? 1
3030 : bits_per_limb / static_cast<uint64_t>(mp_bits_per_limb);
3031 }
3032 }
3033
3034 /* for leading zeros */
3035 if (zeros) {
3036 if (is_gmp()) {
3038 }
3039 else {
3040 if (n_limbs == 0) {
3041 *gmp_limb = 0;
3042 }
3043 else {
3044 *gmp_limb = n_limbs == 1 ? d_val_uint64
3046 ? 0
3048 }
3049 }
3050 return n_limbs;
3051 }
3052
3053 /* for leading ones */
3055 d_size / static_cast<uint64_t>(mp_bits_per_limb) + (nbits_rem ? 1 : 0);
3056 if (n_limbs != n_limbs_total) {
3057 /* no leading ones, simulate */
3058 *gmp_limb = nbits_rem ? ~(~((mp_limb_t)0) << nbits_rem) : ~((mp_limb_t)0);
3059 return n_limbs_total;
3060 }
3061 mask = ~((mp_limb_t)0) << nbits_rem;
3062 for (i = 0; i < n_limbs; i++) {
3063 if (is_gmp()) {
3065 }
3066 else if (mp_bits_per_limb >= bits_per_limb) {
3067 res = d_val_uint64;
3068 }
3069 else {
3072 }
3073 if (nbits_rem && i == 0) {
3074 res = res | mask;
3075 }
3076 res = ~res;
3077 if (res > 0)
3078 break;
3079 }
3080 *gmp_limb = res;
3081 return n_limbs - i;
3082}
3083
3084bool BitVector::shift_is_uint64(uint64_t *res) const {
3085 if (d_size <= 64) {
3086 *res = to_uint64();
3087 return true;
3088 }
3089
3091 if (clz < d_size - 64)
3092 return false;
3093
3094 BitVector shift = bvextract(clz < d_size ? d_size - 1 - clz : 0, 0);
3095 assert(shift.d_size <= 64);
3096 *res = shift.to_uint64();
3097 return true;
3098}
3099
3100std::ostream &operator<<(std::ostream &out, const BitVector &bv) {
3101 out << bv.str();
3102 return out;
3103}
3104
3105/* -------------------------------------------------------------------------- */
3106
3107} // namespace stabilizer::util
BitVector & ibvsext(const BitVector &bv, uint64_t n)
BitVector bvmodinv() const
static BitVector mk_min_signed(uint64_t size)
BitVector bvumulo(const BitVector &bv) const
BitVector & ibveq(const BitVector &bv0, const BitVector &bv1)
BitVector bvredor() const
BitVector bvshl(uint64_t shift) const
BitVector bvudiv(const BitVector &bv) const
BitVector & ibvpow(const BitVector &base, const mpz_t exp)
uint64_t to_uint64(bool truncate=false) const
BitVector & ibvsdivo(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvule(const BitVector &bv0, const BitVector &bv1)
BitVector bvusubo(const BitVector &bv) const
BitVector bvand(const BitVector &bv) const
BitVector & ibvsrem(const BitVector &bv0, const BitVector &bv1)
BitVector bvroli(uint64_t n) const
BitVector bvnego() const
BitVector & ibvsub(const BitVector &bv0, const BitVector &bv1)
static BitVector mk_max_signed(uint64_t size)
BitVector bvslt(const BitVector &bv) const
uint64_t count_leading_ones() const
BitVector & ibvult(const BitVector &bv0, const BitVector &bv1)
static BitVector mk_one(uint64_t size)
BitVector & ibvsgt(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvinc(const BitVector &bv)
BitVector & ibvsmod(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvuaddo(const BitVector &bv0, const BitVector &bv1)
BitVector bvextract(uint64_t idx_hi, uint64_t idx_lo) const
BitVector bvshr(uint64_t shift) const
BitVector & ibvzext(const BitVector &bv, uint64_t n)
BitVector bvredand() const
static BitVector from_si(uint64_t size, int64_t value, bool truncate=false)
static uint64_t uint64_fdiv_r_2exp(uint64_t size, uint64_t val)
BitVector & ibvmodinv(const BitVector &bv)
uint64_t count_trailing_zeros() const
uint64_t count_trailing_ones() const
BitVector bvashr(uint64_t shift) const
bool operator!=(const BitVector &bv) const
BitVector bvsle(const BitVector &bv) const
BitVector & ibvrori(const BitVector &bv, uint64_t n)
BitVector bveq(const BitVector &bv) const
BitVector & ibvxor(const BitVector &bv0, const BitVector &bv1)
BitVector bvinc() const
BitVector bvsub(const BitVector &bv) const
BitVector bvnand(const BitVector &bv) const
BitVector & operator=(const BitVector &other)
bool is_sadd_overflow(const BitVector &bv) const
bool is_uadd_overflow(const BitVector &bv) const
BitVector & ibvnand(const BitVector &bv0, const BitVector &bv1)
static BitVector mk_ones(uint64_t size)
BitVector & ibvnot(const BitVector &bv)
BitVector & ibvite(const BitVector &c, const BitVector &t, const BitVector &e)
BitVector & ibvudiv(const BitVector &bv0, const BitVector &bv1)
BitVector bvsaddo(const BitVector &bv) const
BitVector bvsdiv(const BitVector &bv) const
BitVector & ibvxnor(const BitVector &bv0, const BitVector &bv1)
BitVector bvrori(uint64_t n) const
BitVector bvconcat(const BitVector &bv) const
BitVector & ibvor(const BitVector &bv0, const BitVector &bv1)
BitVector bvule(const BitVector &bv) const
BitVector & ibvredand(const BitVector &bv)
void flip_bit(uint64_t idx)
BitVector bvsext(uint64_t n) const
static BitVector mk_false()
BitVector bvurem(const BitVector &bv) const
bool is_sdiv_overflow(const BitVector &bv) const
void set_bit(uint64_t idx, bool value)
BitVector bvuge(const BitVector &bv) const
BitVector & ibvredxor(const BitVector &bv)
bool is_smul_overflow(const BitVector &bv) const
uint64_t get_limb(void *limb, uint64_t nbits_rem, bool zeros) const
uint64_t count_leading_zeros() const
BitVector bvsdivo(const BitVector &bv) const
BitVector & ibvuge(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvextract(const BitVector &bv, uint64_t idx_hi, uint64_t idx_lo)
BitVector & ibvashr(const BitVector &bv, uint64_t shift)
BitVector & ibvsge(const BitVector &bv0, const BitVector &bv1)
static BitVector mk_zero(uint64_t size)
BitVector bvpow(const mpz_t exp) const
BitVector bvzext(uint64_t n) const
BitVector bvneg() const
BitVector & ibvnor(const BitVector &bv0, const BitVector &bv1)
BitVector bvdec() const
BitVector bvor(const BitVector &bv) const
void bvudivurem(const BitVector &bv, BitVector *quot, BitVector *rem) const
BitVector bvmul(const BitVector &bv) const
BitVector bvrol(const BitVector &n) const
std::string str(uint32_t base=2) const
BitVector & ibvumulo(const BitVector &bv0, const BitVector &bv1)
BitVector bvnot() const
int32_t compare(const BitVector &bv) const
uint64_t count_leading(bool zeros) const
BitVector bvssubo(const BitVector &bv) const
BitVector bvxnor(const BitVector &bv) const
static BitVector mk_true()
BitVector bvsge(const BitVector &bv) const
BitVector & ibvsaddo(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvadd(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvsle(const BitVector &bv0, const BitVector &bv1)
BitVector bvnor(const BitVector &bv) const
static BitVector from_ui(uint64_t size, uint64_t value, bool truncate=false)
BitVector bvrepeat(uint64_t n) const
BitVector bvuaddo(const BitVector &bv) const
bool operator==(const BitVector &bv) const
BitVector bvsmulo(const BitVector &bv) const
BitVector bvredxor() const
BitVector & ibvrepeat(const BitVector &bv, uint64_t n)
BitVector & ibvneg(const BitVector &bv)
BitVector & ibvugt(const BitVector &bv0, const BitVector &bv1)
BitVector bvsmod(const BitVector &bv) const
void iset(uint64_t value)
BitVector & ibvsdiv(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvshr(const BitVector &bv, uint64_t shift)
BitVector & ibvnego(const BitVector &bv)
BitVector & ibvslt(const BitVector &bv0, const BitVector &bv1)
BitVector bvadd(const BitVector &bv) const
BitVector & ibvroli(const BitVector &bv, uint64_t n)
BitVector & ibvimplies(const BitVector &bv0, const BitVector &bv1)
BitVector bvugt(const BitVector &bv) const
BitVector & ibvand(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvrol(const BitVector &bv, const BitVector &n)
BitVector & ibvror(const BitVector &bv, const BitVector &n)
BitVector & ibvconcat(const BitVector &bv0, const BitVector &bv1)
BitVector bvimplies(const BitVector &bv) const
BitVector bvsrem(const BitVector &bv) const
bool shift_is_uint64(uint64_t *res) const
bool is_usub_overflow(const BitVector &bv) const
BitVector & ibvsmulo(const BitVector &bv0, const BitVector &bv1)
BitVector bvsgt(const BitVector &bv) const
BitVector & ibvredor(const BitVector &bv)
BitVector bvxor(const BitVector &bv) const
static BitVector bvite(const BitVector &c, const BitVector &t, const BitVector &e)
BitVector & ibvdec(const BitVector &bv)
BitVector bvult(const BitVector &bv) const
BitVector & ibvurem(const BitVector &bv0, const BitVector &bv1)
static constexpr size_t s_native_size
Definition bitvector.h:36
BitVector & ibvssubo(const BitVector &bv0, const BitVector &bv1)
bool is_ssub_overflow(const BitVector &bv) const
BitVector & ibvne(const BitVector &bv0, const BitVector &bv1)
BitVector & ibvusubo(const BitVector &bv0, const BitVector &bv1)
static bool fits_in_size(uint64_t size, const std::string &str, uint32_t base)
Definition bitvector.cpp:73
const mpz_t & gmp_value() const
int32_t signed_compare(const BitVector &bv) const
BitVector bvror(const BitVector &n) const
uint64_t size() const
Definition bitvector.h:248
BitVector & ibvshl(const BitVector &bv, uint64_t shift)
BitVector bvne(const BitVector &bv) const
BitVector & ibvmul(const BitVector &bv0, const BitVector &bv1)
bool bit(uint64_t idx) const
bool is_umul_overflow(const BitVector &bv) const
std::ostream & operator<<(std::ostream &out, const BitVector &bv)
mpz_class uint64_to_mpz_class(uint64_t op)
Definition gmp_utils.cpp:64
void mpz_set_ull(mpz_t rop, uint64_t op)
Definition gmp_utils.cpp:22
void mpz_fdiv_r_2exp_ull(mpz_t r, const mpz_t n, uint64_t b)
void mpz_init_set_sll(mpz_t rop, int64_t op)
Definition gmp_utils.cpp:72
void mpz_fdiv_q_2exp_ull(mpz_t q, const mpz_t n, uint64_t b)
uint64_t mpz_get_ull(const mpz_t op)
Definition gmp_utils.cpp:35
void mpz_init_set_ull(mpz_t rop, uint64_t op)
Definition gmp_utils.cpp:50
size_t mpz_hash(const mpz_t op, uint64_t start)
Definition gmp_utils.cpp:86
void mpz_mul_2exp_ull(mpz_t rop, const mpz_t op1, uint64_t op2)