stor-layout.c (finish_builtin_struct): Copy fields into the variants.
[gcc.git] / gcc / wide-int.cc
1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "tree.h"
28 #include "dumpfile.h"
29
30
31 #define HOST_BITS_PER_HALF_WIDE_INT 32
32 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
33 # define HOST_HALF_WIDE_INT long
34 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
35 # define HOST_HALF_WIDE_INT int
36 #else
37 #error Please add support for HOST_HALF_WIDE_INT
38 #endif
39
40 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
41 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
42 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
43 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
44 typedef unsigned HOST_WIDE_INT UWtype;
45 typedef unsigned int UQItype __attribute__ ((mode (QI)));
46 typedef unsigned int USItype __attribute__ ((mode (SI)));
47 typedef unsigned int UDItype __attribute__ ((mode (DI)));
48 #if W_TYPE_SIZE == 32
49 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
50 #else
51 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
52 #endif
53 #include "longlong.h"
54 #endif
55
56 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
57
58 /*
59 * Internal utilities.
60 */
61
62 /* Quantities to deal with values that hold half of a wide int. Used
63 in multiply and divide. */
64 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
65
66 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
67 #define BLOCKS_NEEDED(PREC) \
68 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
69 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
70
71 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
72 based on the top existing bit of VAL. */
73
74 static unsigned HOST_WIDE_INT
75 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
76 {
77 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
78 }
79
80 /* Convert the integer in VAL to canonical form, returning its new length.
81 LEN is the number of blocks currently in VAL and PRECISION is the number
82 of bits in the integer it represents.
83
84 This function only changes the representation, not the value. */
85 static unsigned int
86 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
87 {
88 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
89 HOST_WIDE_INT top;
90 int i;
91
92 if (len > blocks_needed)
93 len = blocks_needed;
94
95 if (len == 1)
96 return len;
97
98 top = val[len - 1];
99 if (len * HOST_BITS_PER_WIDE_INT > precision)
100 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
101 if (top != 0 && top != (HOST_WIDE_INT)-1)
102 return len;
103
104 /* At this point we know that the top is either 0 or -1. Find the
105 first block that is not a copy of this. */
106 for (i = len - 2; i >= 0; i--)
107 {
108 HOST_WIDE_INT x = val[i];
109 if (x != top)
110 {
111 if (SIGN_MASK (x) == top)
112 return i + 1;
113
114 /* We need an extra block because the top bit block i does
115 not match the extension. */
116 return i + 2;
117 }
118 }
119
120 /* The number is 0 or -1. */
121 return 1;
122 }
123
124 /*
125 * Conversion routines in and out of wide_int.
126 */
127
128 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
129 result for an integer with precision PRECISION. Return the length
130 of VAL (after any canonization. */
131 unsigned int
132 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
133 unsigned int xlen, unsigned int precision, bool need_canon)
134 {
135 for (unsigned i = 0; i < xlen; i++)
136 val[i] = xval[i];
137 return need_canon ? canonize (val, xlen, precision) : xlen;
138 }
139
140 /* Construct a wide int from a buffer of length LEN. BUFFER will be
141 read according to byte endianess and word endianess of the target.
142 Only the lower BUFFER_LEN bytes of the result are set; the remaining
143 high bytes are cleared. */
144 wide_int
145 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
146 {
147 unsigned int precision = buffer_len * BITS_PER_UNIT;
148 wide_int result = wide_int::create (precision);
149 unsigned int words = buffer_len / UNITS_PER_WORD;
150
151 /* We have to clear all the bits ourself, as we merely or in values
152 below. */
153 unsigned int len = BLOCKS_NEEDED (precision);
154 HOST_WIDE_INT *val = result.write_val ();
155 for (unsigned int i = 0; i < len; ++i)
156 val[i] = 0;
157
158 for (unsigned int byte = 0; byte < buffer_len; byte++)
159 {
160 unsigned int offset;
161 unsigned int index;
162 unsigned int bitpos = byte * BITS_PER_UNIT;
163 unsigned HOST_WIDE_INT value;
164
165 if (buffer_len > UNITS_PER_WORD)
166 {
167 unsigned int word = byte / UNITS_PER_WORD;
168
169 if (WORDS_BIG_ENDIAN)
170 word = (words - 1) - word;
171
172 offset = word * UNITS_PER_WORD;
173
174 if (BYTES_BIG_ENDIAN)
175 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
176 else
177 offset += byte % UNITS_PER_WORD;
178 }
179 else
180 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
181
182 value = (unsigned HOST_WIDE_INT) buffer[offset];
183
184 index = bitpos / HOST_BITS_PER_WIDE_INT;
185 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
186 }
187
188 result.set_len (canonize (val, len, precision));
189
190 return result;
191 }
192
193 /* Sets RESULT from X, the sign is taken according to SGN. */
194 void
195 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
196 {
197 int len = x.get_len ();
198 const HOST_WIDE_INT *v = x.get_val ();
199 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
200
201 if (wi::neg_p (x, sgn))
202 {
203 /* We use ones complement to avoid -x80..0 edge case that -
204 won't work on. */
205 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
206 for (int i = 0; i < len; i++)
207 t[i] = ~v[i];
208 if (excess > 0)
209 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
210 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
211 mpz_com (result, result);
212 }
213 else if (excess > 0)
214 {
215 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
216 for (int i = 0; i < len - 1; i++)
217 t[i] = v[i];
218 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
219 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
220 }
221 else
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
223 }
224
225 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
226 values of VAL will be wrapped; otherwise, they will be set to the
227 appropriate minimum or maximum TYPE bound. */
228 wide_int
229 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
230 {
231 size_t count, numb;
232 int prec = TYPE_PRECISION (type);
233 wide_int res = wide_int::create (prec);
234
235 if (!wrap)
236 {
237 mpz_t min, max;
238
239 mpz_init (min);
240 mpz_init (max);
241 get_type_static_bounds (type, min, max);
242
243 if (mpz_cmp (x, min) < 0)
244 mpz_set (x, min);
245 else if (mpz_cmp (x, max) > 0)
246 mpz_set (x, max);
247
248 mpz_clear (min);
249 mpz_clear (max);
250 }
251
252 /* Determine the number of unsigned HOST_WIDE_INTs that are required
253 for representing the value. The code to calculate count is
254 extracted from the GMP manual, section "Integer Import and Export":
255 http://gmplib.org/manual/Integer-Import-and-Export.html */
256 numb = 8 * sizeof(HOST_WIDE_INT);
257 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
258 HOST_WIDE_INT *val = res.write_val ();
259 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
260 if (count < 1)
261 {
262 val[0] = 0;
263 count = 1;
264 }
265 res.set_len (count);
266
267 if (mpz_sgn (x) < 0)
268 res = -res;
269
270 return res;
271 }
272
273 /*
274 * Largest and smallest values in a mode.
275 */
276
277 /* Return the largest SGNed number that is representable in PRECISION bits.
278
279 TODO: There is still code from the double_int era that trys to
280 make up for the fact that double int's could not represent the
281 min and max values of all types. This code should be removed
282 because the min and max values can always be represented in
283 wide_ints and int-csts. */
284 wide_int
285 wi::max_value (unsigned int precision, signop sgn)
286 {
287 gcc_checking_assert (precision != 0);
288 if (sgn == UNSIGNED)
289 /* The unsigned max is just all ones. */
290 return shwi (-1, precision);
291 else
292 /* The signed max is all ones except the top bit. This must be
293 explicitly represented. */
294 return mask (precision - 1, false, precision);
295 }
296
297 /* Return the largest SGNed number that is representable in PRECISION bits. */
298 wide_int
299 wi::min_value (unsigned int precision, signop sgn)
300 {
301 gcc_checking_assert (precision != 0);
302 if (sgn == UNSIGNED)
303 return uhwi (0, precision);
304 else
305 /* The signed min is all zeros except the top bit. This must be
306 explicitly represented. */
307 return wi::set_bit_in_zero (precision - 1, precision);
308 }
309
310 /*
311 * Public utilities.
312 */
313
314 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
315 signedness SGN, to an integer that has PRECISION bits. Store the blocks
316 in VAL and return the number of blocks used.
317
318 This function can handle both extension (PRECISION > XPRECISION)
319 and truncation (PRECISION < XPRECISION). */
320 unsigned int
321 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
322 unsigned int xlen, unsigned int xprecision,
323 unsigned int precision, signop sgn)
324 {
325 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
326 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
327 for (unsigned i = 0; i < len; i++)
328 val[i] = xval[i];
329
330 if (precision > xprecision)
331 {
332 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
333
334 /* Expanding. */
335 if (sgn == UNSIGNED)
336 {
337 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
338 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
339 else if (val[len - 1] < 0)
340 {
341 while (len < BLOCKS_NEEDED (xprecision))
342 val[len++] = -1;
343 if (small_xprecision)
344 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
345 else
346 val[len++] = 0;
347 }
348 }
349 else
350 {
351 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
352 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
353 }
354 }
355 len = canonize (val, len, precision);
356
357 return len;
358 }
359
360 /* This function hides the fact that we cannot rely on the bits beyond
361 the precision. This issue comes up in the relational comparisions
362 where we do allow comparisons of values of different precisions. */
363 static inline HOST_WIDE_INT
364 selt (const HOST_WIDE_INT *a, unsigned int len,
365 unsigned int blocks_needed, unsigned int small_prec,
366 unsigned int index, signop sgn)
367 {
368 HOST_WIDE_INT val;
369 if (index < len)
370 val = a[index];
371 else if (index < blocks_needed || sgn == SIGNED)
372 /* Signed or within the precision. */
373 val = SIGN_MASK (a[len - 1]);
374 else
375 /* Unsigned extension beyond the precision. */
376 val = 0;
377
378 if (small_prec && index == blocks_needed - 1)
379 return (sgn == SIGNED
380 ? sext_hwi (val, small_prec)
381 : zext_hwi (val, small_prec));
382 else
383 return val;
384 }
385
386 /* Find the highest bit represented in a wide int. This will in
387 general have the same value as the sign bit. */
388 static inline HOST_WIDE_INT
389 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
390 {
391 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
392 unsigned HOST_WIDE_INT val = a[len - 1];
393 if (excess > 0)
394 val <<= excess;
395 return val >> (HOST_BITS_PER_WIDE_INT - 1);
396 }
397
398 /*
399 * Comparisons, note that only equality is an operator. The other
400 * comparisons cannot be operators since they are inherently signed or
401 * unsigned and C++ has no such operators.
402 */
403
404 /* Return true if OP0 == OP1. */
405 bool
406 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
407 const HOST_WIDE_INT *op1, unsigned int op1len,
408 unsigned int prec)
409 {
410 int l0 = op0len - 1;
411 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
412
413 if (op0len != op1len)
414 return false;
415
416 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
417 {
418 /* It does not matter if we zext or sext here, we just have to
419 do both the same way. */
420 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
421 return false;
422 l0--;
423 }
424
425 while (l0 >= 0)
426 if (op0[l0] != op1[l0])
427 return false;
428 else
429 l0--;
430
431 return true;
432 }
433
434 /* Return true if OP0 < OP1 using signed comparisons. */
435 bool
436 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
437 unsigned int precision,
438 const HOST_WIDE_INT *op1, unsigned int op1len)
439 {
440 HOST_WIDE_INT s0, s1;
441 unsigned HOST_WIDE_INT u0, u1;
442 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
443 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
444 int l = MAX (op0len - 1, op1len - 1);
445
446 /* Only the top block is compared as signed. The rest are unsigned
447 comparisons. */
448 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
449 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
450 if (s0 < s1)
451 return true;
452 if (s0 > s1)
453 return false;
454
455 l--;
456 while (l >= 0)
457 {
458 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
459 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
460
461 if (u0 < u1)
462 return true;
463 if (u0 > u1)
464 return false;
465 l--;
466 }
467
468 return false;
469 }
470
471 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
472 signed compares. */
473 int
474 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
475 unsigned int precision,
476 const HOST_WIDE_INT *op1, unsigned int op1len)
477 {
478 HOST_WIDE_INT s0, s1;
479 unsigned HOST_WIDE_INT u0, u1;
480 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
481 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
482 int l = MAX (op0len - 1, op1len - 1);
483
484 /* Only the top block is compared as signed. The rest are unsigned
485 comparisons. */
486 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
487 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
488 if (s0 < s1)
489 return -1;
490 if (s0 > s1)
491 return 1;
492
493 l--;
494 while (l >= 0)
495 {
496 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
497 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
498
499 if (u0 < u1)
500 return -1;
501 if (u0 > u1)
502 return 1;
503 l--;
504 }
505
506 return 0;
507 }
508
509 /* Return true if OP0 < OP1 using unsigned comparisons. */
510 bool
511 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
512 unsigned int precision,
513 const HOST_WIDE_INT *op1, unsigned int op1len)
514 {
515 unsigned HOST_WIDE_INT x0;
516 unsigned HOST_WIDE_INT x1;
517 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
518 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
519 int l = MAX (op0len - 1, op1len - 1);
520
521 while (l >= 0)
522 {
523 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
524 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
525 if (x0 < x1)
526 return true;
527 if (x0 > x1)
528 return false;
529 l--;
530 }
531
532 return false;
533 }
534
535 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
536 unsigned compares. */
537 int
538 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
539 unsigned int precision,
540 const HOST_WIDE_INT *op1, unsigned int op1len)
541 {
542 unsigned HOST_WIDE_INT x0;
543 unsigned HOST_WIDE_INT x1;
544 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
545 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
546 int l = MAX (op0len - 1, op1len - 1);
547
548 while (l >= 0)
549 {
550 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
551 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
552 if (x0 < x1)
553 return -1;
554 if (x0 > x1)
555 return 1;
556 l--;
557 }
558
559 return 0;
560 }
561
562 /*
563 * Extension.
564 */
565
566 /* Sign-extend the number represented by XVAL and XLEN into VAL,
567 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
568 and VAL have PRECISION bits. */
569 unsigned int
570 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
571 unsigned int xlen, unsigned int precision, unsigned int offset)
572 {
573 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
574 /* Extending beyond the precision is a no-op. If we have only stored
575 OFFSET bits or fewer, the rest are already signs. */
576 if (offset >= precision || len >= xlen)
577 {
578 for (unsigned i = 0; i < xlen; ++i)
579 val[i] = xval[i];
580 return xlen;
581 }
582 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
583 for (unsigned int i = 0; i < len; i++)
584 val[i] = xval[i];
585 if (suboffset > 0)
586 {
587 val[len] = sext_hwi (xval[len], suboffset);
588 len += 1;
589 }
590 return canonize (val, len, precision);
591 }
592
593 /* Zero-extend the number represented by XVAL and XLEN into VAL,
594 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
595 and VAL have PRECISION bits. */
596 unsigned int
597 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
598 unsigned int xlen, unsigned int precision, unsigned int offset)
599 {
600 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
601 /* Extending beyond the precision is a no-op. If we have only stored
602 OFFSET bits or fewer, and the upper stored bit is zero, then there
603 is nothing to do. */
604 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
605 {
606 for (unsigned i = 0; i < xlen; ++i)
607 val[i] = xval[i];
608 return xlen;
609 }
610 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
611 for (unsigned int i = 0; i < len; i++)
612 val[i] = i < xlen ? xval[i] : -1;
613 if (suboffset > 0)
614 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
615 else
616 val[len] = 0;
617 return canonize (val, len + 1, precision);
618 }
619
620 /*
621 * Masking, inserting, shifting, rotating.
622 */
623
624 /* Insert WIDTH bits from Y into X starting at START. */
625 wide_int
626 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
627 unsigned int width)
628 {
629 wide_int result;
630 wide_int mask;
631 wide_int tmp;
632
633 unsigned int precision = x.get_precision ();
634 if (start >= precision)
635 return x;
636
637 gcc_checking_assert (precision >= width);
638
639 if (start + width >= precision)
640 width = precision - start;
641
642 mask = wi::shifted_mask (start, width, false, precision);
643 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
644 result = tmp & mask;
645
646 tmp = wi::bit_and_not (x, mask);
647 result = result | tmp;
648
649 return result;
650 }
651
652 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
653 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
654 bits. */
655 unsigned int
656 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
657 unsigned int xlen, unsigned int precision, unsigned int bit)
658 {
659 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
660 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
661
662 if (block + 1 >= xlen)
663 {
664 /* The operation either affects the last current block or needs
665 a new block. */
666 unsigned int len = block + 1;
667 for (unsigned int i = 0; i < len; i++)
668 val[i] = safe_uhwi (xval, xlen, i);
669 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
670
671 /* If the bit we just set is at the msb of the block, make sure
672 that any higher bits are zeros. */
673 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
674 val[len++] = 0;
675 return len;
676 }
677 else
678 {
679 for (unsigned int i = 0; i < xlen; i++)
680 val[i] = xval[i];
681 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
682 return canonize (val, xlen, precision);
683 }
684 }
685
686 /* bswap THIS. */
687 wide_int
688 wide_int_storage::bswap () const
689 {
690 wide_int result = wide_int::create (precision);
691 unsigned int i, s;
692 unsigned int len = BLOCKS_NEEDED (precision);
693 unsigned int xlen = get_len ();
694 const HOST_WIDE_INT *xval = get_val ();
695 HOST_WIDE_INT *val = result.write_val ();
696
697 /* This is not a well defined operation if the precision is not a
698 multiple of 8. */
699 gcc_assert ((precision & 0x7) == 0);
700
701 for (i = 0; i < len; i++)
702 val[i] = 0;
703
704 /* Only swap the bytes that are not the padding. */
705 for (s = 0; s < precision; s += 8)
706 {
707 unsigned int d = precision - s - 8;
708 unsigned HOST_WIDE_INT byte;
709
710 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
711 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
712
713 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
714
715 block = d / HOST_BITS_PER_WIDE_INT;
716 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
717
718 val[block] |= byte << offset;
719 }
720
721 result.set_len (canonize (val, len, precision));
722 return result;
723 }
724
725 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
726 above that up to PREC are zeros. The result is inverted if NEGATE
727 is true. Return the number of blocks in VAL. */
728 unsigned int
729 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
730 unsigned int prec)
731 {
732 if (width >= prec)
733 {
734 val[0] = negate ? 0 : -1;
735 return 1;
736 }
737 else if (width == 0)
738 {
739 val[0] = negate ? -1 : 0;
740 return 1;
741 }
742
743 unsigned int i = 0;
744 while (i < width / HOST_BITS_PER_WIDE_INT)
745 val[i++] = negate ? 0 : -1;
746
747 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
748 if (shift != 0)
749 {
750 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
751 val[i++] = negate ? ~last : last;
752 }
753 else
754 val[i++] = negate ? -1 : 0;
755
756 return i;
757 }
758
759 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
760 bits are ones, and the bits above that up to PREC are zeros. The result
761 is inverted if NEGATE is true. Return the number of blocks in VAL. */
762 unsigned int
763 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
764 bool negate, unsigned int prec)
765 {
766 if (start >= prec || width == 0)
767 {
768 val[0] = negate ? -1 : 0;
769 return 1;
770 }
771
772 if (width > prec - start)
773 width = prec - start;
774 unsigned int end = start + width;
775
776 unsigned int i = 0;
777 while (i < start / HOST_BITS_PER_WIDE_INT)
778 val[i++] = negate ? -1 : 0;
779
780 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
781 if (shift)
782 {
783 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
784 shift += width;
785 if (shift < HOST_BITS_PER_WIDE_INT)
786 {
787 /* case 000111000 */
788 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
789 val[i++] = negate ? ~block : block;
790 return i;
791 }
792 else
793 /* ...111000 */
794 val[i++] = negate ? block : ~block;
795 }
796
797 while (i < end / HOST_BITS_PER_WIDE_INT)
798 /* 1111111 */
799 val[i++] = negate ? 0 : -1;
800
801 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
802 if (shift != 0)
803 {
804 /* 000011111 */
805 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
806 val[i++] = negate ? ~block : block;
807 }
808 else if (end < prec)
809 val[i++] = negate ? -1 : 0;
810
811 return i;
812 }
813
814 /*
815 * logical operations.
816 */
817
818 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
819 unsigned int
820 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
821 unsigned int op0len, const HOST_WIDE_INT *op1,
822 unsigned int op1len, unsigned int prec)
823 {
824 int l0 = op0len - 1;
825 int l1 = op1len - 1;
826 bool need_canon = true;
827
828 unsigned int len = MAX (op0len, op1len);
829 if (l0 > l1)
830 {
831 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
832 if (op1mask == 0)
833 {
834 l0 = l1;
835 len = l1 + 1;
836 }
837 else
838 {
839 need_canon = false;
840 while (l0 > l1)
841 {
842 val[l0] = op0[l0];
843 l0--;
844 }
845 }
846 }
847 else if (l1 > l0)
848 {
849 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
850 if (op0mask == 0)
851 len = l0 + 1;
852 else
853 {
854 need_canon = false;
855 while (l1 > l0)
856 {
857 val[l1] = op1[l1];
858 l1--;
859 }
860 }
861 }
862
863 while (l0 >= 0)
864 {
865 val[l0] = op0[l0] & op1[l0];
866 l0--;
867 }
868
869 if (need_canon)
870 len = canonize (val, len, prec);
871
872 return len;
873 }
874
875 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
876 unsigned int
877 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
878 unsigned int op0len, const HOST_WIDE_INT *op1,
879 unsigned int op1len, unsigned int prec)
880 {
881 wide_int result;
882 int l0 = op0len - 1;
883 int l1 = op1len - 1;
884 bool need_canon = true;
885
886 unsigned int len = MAX (op0len, op1len);
887 if (l0 > l1)
888 {
889 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
890 if (op1mask != 0)
891 {
892 l0 = l1;
893 len = l1 + 1;
894 }
895 else
896 {
897 need_canon = false;
898 while (l0 > l1)
899 {
900 val[l0] = op0[l0];
901 l0--;
902 }
903 }
904 }
905 else if (l1 > l0)
906 {
907 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
908 if (op0mask == 0)
909 len = l0 + 1;
910 else
911 {
912 need_canon = false;
913 while (l1 > l0)
914 {
915 val[l1] = ~op1[l1];
916 l1--;
917 }
918 }
919 }
920
921 while (l0 >= 0)
922 {
923 val[l0] = op0[l0] & ~op1[l0];
924 l0--;
925 }
926
927 if (need_canon)
928 len = canonize (val, len, prec);
929
930 return len;
931 }
932
933 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
934 unsigned int
935 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
936 unsigned int op0len, const HOST_WIDE_INT *op1,
937 unsigned int op1len, unsigned int prec)
938 {
939 wide_int result;
940 int l0 = op0len - 1;
941 int l1 = op1len - 1;
942 bool need_canon = true;
943
944 unsigned int len = MAX (op0len, op1len);
945 if (l0 > l1)
946 {
947 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
948 if (op1mask != 0)
949 {
950 l0 = l1;
951 len = l1 + 1;
952 }
953 else
954 {
955 need_canon = false;
956 while (l0 > l1)
957 {
958 val[l0] = op0[l0];
959 l0--;
960 }
961 }
962 }
963 else if (l1 > l0)
964 {
965 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
966 if (op0mask != 0)
967 len = l0 + 1;
968 else
969 {
970 need_canon = false;
971 while (l1 > l0)
972 {
973 val[l1] = op1[l1];
974 l1--;
975 }
976 }
977 }
978
979 while (l0 >= 0)
980 {
981 val[l0] = op0[l0] | op1[l0];
982 l0--;
983 }
984
985 if (need_canon)
986 len = canonize (val, len, prec);
987
988 return len;
989 }
990
991 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
992 unsigned int
993 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
994 unsigned int op0len, const HOST_WIDE_INT *op1,
995 unsigned int op1len, unsigned int prec)
996 {
997 wide_int result;
998 int l0 = op0len - 1;
999 int l1 = op1len - 1;
1000 bool need_canon = true;
1001
1002 unsigned int len = MAX (op0len, op1len);
1003 if (l0 > l1)
1004 {
1005 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1006 if (op1mask == 0)
1007 {
1008 l0 = l1;
1009 len = l1 + 1;
1010 }
1011 else
1012 {
1013 need_canon = false;
1014 while (l0 > l1)
1015 {
1016 val[l0] = op0[l0];
1017 l0--;
1018 }
1019 }
1020 }
1021 else if (l1 > l0)
1022 {
1023 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1024 if (op0mask != 0)
1025 len = l0 + 1;
1026 else
1027 {
1028 need_canon = false;
1029 while (l1 > l0)
1030 {
1031 val[l1] = ~op1[l1];
1032 l1--;
1033 }
1034 }
1035 }
1036
1037 while (l0 >= 0)
1038 {
1039 val[l0] = op0[l0] | ~op1[l0];
1040 l0--;
1041 }
1042
1043 if (need_canon)
1044 len = canonize (val, len, prec);
1045
1046 return len;
1047 }
1048
1049 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1050 unsigned int
1051 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1052 unsigned int op0len, const HOST_WIDE_INT *op1,
1053 unsigned int op1len, unsigned int prec)
1054 {
1055 wide_int result;
1056 int l0 = op0len - 1;
1057 int l1 = op1len - 1;
1058
1059 unsigned int len = MAX (op0len, op1len);
1060 if (l0 > l1)
1061 {
1062 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1063 while (l0 > l1)
1064 {
1065 val[l0] = op0[l0] ^ op1mask;
1066 l0--;
1067 }
1068 }
1069
1070 if (l1 > l0)
1071 {
1072 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1073 while (l1 > l0)
1074 {
1075 val[l1] = op0mask ^ op1[l1];
1076 l1--;
1077 }
1078 }
1079
1080 while (l0 >= 0)
1081 {
1082 val[l0] = op0[l0] ^ op1[l0];
1083 l0--;
1084 }
1085
1086 return canonize (val, len, prec);
1087 }
1088
1089 /*
1090 * math
1091 */
1092
1093 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1094 whether the result overflows when OP0 and OP1 are treated as having
1095 signedness SGN. Return the number of blocks in VAL. */
1096 unsigned int
1097 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1098 unsigned int op0len, const HOST_WIDE_INT *op1,
1099 unsigned int op1len, unsigned int prec,
1100 signop sgn, bool *overflow)
1101 {
1102 unsigned HOST_WIDE_INT o0 = 0;
1103 unsigned HOST_WIDE_INT o1 = 0;
1104 unsigned HOST_WIDE_INT x = 0;
1105 unsigned HOST_WIDE_INT carry = 0;
1106 unsigned HOST_WIDE_INT old_carry = 0;
1107 unsigned HOST_WIDE_INT mask0, mask1;
1108 unsigned int i;
1109
1110 unsigned int len = MAX (op0len, op1len);
1111 mask0 = -top_bit_of (op0, op0len, prec);
1112 mask1 = -top_bit_of (op1, op1len, prec);
1113 /* Add all of the explicitly defined elements. */
1114
1115 for (i = 0; i < len; i++)
1116 {
1117 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1118 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1119 x = o0 + o1 + carry;
1120 val[i] = x;
1121 old_carry = carry;
1122 carry = carry == 0 ? x < o0 : x <= o0;
1123 }
1124
1125 if (len * HOST_BITS_PER_WIDE_INT < prec)
1126 {
1127 val[len] = mask0 + mask1 + carry;
1128 len++;
1129 if (overflow)
1130 *overflow = false;
1131 }
1132 else if (overflow)
1133 {
1134 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1135 if (sgn == SIGNED)
1136 {
1137 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1138 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1139 }
1140 else
1141 {
1142 /* Put the MSB of X and O0 and in the top of the HWI. */
1143 x <<= shift;
1144 o0 <<= shift;
1145 if (old_carry)
1146 *overflow = (x <= o0);
1147 else
1148 *overflow = (x < o0);
1149 }
1150 }
1151
1152 return canonize (val, len, prec);
1153 }
1154
1155 /* Subroutines of the multiplication and division operations. Unpack
1156 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1157 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1158 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1159 static void
1160 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1161 unsigned int in_len, unsigned int out_len,
1162 unsigned int prec, signop sgn)
1163 {
1164 unsigned int i;
1165 unsigned int j = 0;
1166 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1167 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1168 HOST_WIDE_INT mask;
1169
1170 if (sgn == SIGNED)
1171 {
1172 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1173 mask &= HALF_INT_MASK;
1174 }
1175 else
1176 mask = 0;
1177
1178 for (i = 0; i < blocks_needed - 1; i++)
1179 {
1180 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1181 result[j++] = x;
1182 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1183 }
1184
1185 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1186 if (small_prec)
1187 {
1188 if (sgn == SIGNED)
1189 x = sext_hwi (x, small_prec);
1190 else
1191 x = zext_hwi (x, small_prec);
1192 }
1193 result[j++] = x;
1194 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1195
1196 /* Smear the sign bit. */
1197 while (j < out_len)
1198 result[j++] = mask;
1199 }
1200
1201 /* The inverse of wi_unpack. IN_LEN is the the number of input
1202 blocks. The number of output blocks will be half this amount. */
1203 static void
1204 wi_pack (unsigned HOST_WIDE_INT *result,
1205 const unsigned HOST_HALF_WIDE_INT *input,
1206 unsigned int in_len)
1207 {
1208 unsigned int i = 0;
1209 unsigned int j = 0;
1210
1211 while (i + 2 < in_len)
1212 {
1213 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1214 | ((unsigned HOST_WIDE_INT)input[i + 1]
1215 << HOST_BITS_PER_HALF_WIDE_INT);
1216 i += 2;
1217 }
1218
1219 /* Handle the case where in_len is odd. For this we zero extend. */
1220 if (in_len & 1)
1221 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1222 else
1223 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1224 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1225 }
1226
1227 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1228 result is returned.
1229
1230 If HIGH is not set, throw away the upper half after the check is
1231 made to see if it overflows. Unfortunately there is no better way
1232 to check for overflow than to do this. If OVERFLOW is nonnull,
1233 record in *OVERFLOW whether the result overflowed. SGN controls
1234 the signedness and is used to check overflow or if HIGH is set. */
1235 unsigned int
1236 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1237 unsigned int op1len, const HOST_WIDE_INT *op2val,
1238 unsigned int op2len, unsigned int prec, signop sgn,
1239 bool *overflow, bool high)
1240 {
1241 unsigned HOST_WIDE_INT o0, o1, k, t;
1242 unsigned int i;
1243 unsigned int j;
1244 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1245 unsigned int half_blocks_needed = blocks_needed * 2;
1246 /* The sizes here are scaled to support a 2x largest mode by 2x
1247 largest mode yielding a 4x largest mode result. This is what is
1248 needed by vpn. */
1249
1250 unsigned HOST_HALF_WIDE_INT
1251 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1252 unsigned HOST_HALF_WIDE_INT
1253 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1254 /* The '2' in 'R' is because we are internally doing a full
1255 multiply. */
1256 unsigned HOST_HALF_WIDE_INT
1257 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1258 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1259
1260 /* If the top level routine did not really pass in an overflow, then
1261 just make sure that we never attempt to set it. */
1262 bool needs_overflow = (overflow != 0);
1263 if (needs_overflow)
1264 *overflow = false;
1265
1266 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1267 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1268
1269 /* This is a surprisingly common case, so do it first. */
1270 if (op1 == 0 || op2 == 0)
1271 {
1272 val[0] = 0;
1273 return 1;
1274 }
1275
1276 #ifdef umul_ppmm
1277 if (sgn == UNSIGNED)
1278 {
1279 /* If the inputs are single HWIs and the output has room for at
1280 least two HWIs, we can use umul_ppmm directly. */
1281 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1282 && wi::fits_uhwi_p (op1)
1283 && wi::fits_uhwi_p (op2))
1284 {
1285 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1286 return 1 + (val[1] != 0 || val[0] < 0);
1287 }
1288 /* Likewise if the output is a full single HWI, except that the
1289 upper HWI of the result is only used for determining overflow.
1290 (We handle this case inline when overflow isn't needed.) */
1291 else if (prec == HOST_BITS_PER_WIDE_INT)
1292 {
1293 unsigned HOST_WIDE_INT upper;
1294 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1295 if (needs_overflow)
1296 *overflow = (upper != 0);
1297 return 1;
1298 }
1299 }
1300 #endif
1301
1302 /* Handle multiplications by 1. */
1303 if (op1 == 1)
1304 {
1305 for (i = 0; i < op2len; i++)
1306 val[i] = op2val[i];
1307 return op2len;
1308 }
1309 if (op2 == 1)
1310 {
1311 for (i = 0; i < op1len; i++)
1312 val[i] = op1val[i];
1313 return op1len;
1314 }
1315
1316 /* If we need to check for overflow, we can only do half wide
1317 multiplies quickly because we need to look at the top bits to
1318 check for the overflow. */
1319 if ((high || needs_overflow)
1320 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1321 {
1322 unsigned HOST_WIDE_INT r;
1323
1324 if (sgn == SIGNED)
1325 {
1326 o0 = op1.to_shwi ();
1327 o1 = op2.to_shwi ();
1328 }
1329 else
1330 {
1331 o0 = op1.to_uhwi ();
1332 o1 = op2.to_uhwi ();
1333 }
1334
1335 r = o0 * o1;
1336 if (needs_overflow)
1337 {
1338 if (sgn == SIGNED)
1339 {
1340 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1341 *overflow = true;
1342 }
1343 else
1344 {
1345 if ((r >> prec) != 0)
1346 *overflow = true;
1347 }
1348 }
1349 val[0] = high ? r >> prec : r;
1350 return 1;
1351 }
1352
1353 /* We do unsigned mul and then correct it. */
1354 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1355 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1356
1357 /* The 2 is for a full mult. */
1358 memset (r, 0, half_blocks_needed * 2
1359 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1360
1361 for (j = 0; j < half_blocks_needed; j++)
1362 {
1363 k = 0;
1364 for (i = 0; i < half_blocks_needed; i++)
1365 {
1366 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1367 + r[i + j] + k);
1368 r[i + j] = t & HALF_INT_MASK;
1369 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1370 }
1371 r[j + half_blocks_needed] = k;
1372 }
1373
1374 /* We did unsigned math above. For signed we must adjust the
1375 product (assuming we need to see that). */
1376 if (sgn == SIGNED && (high || needs_overflow))
1377 {
1378 unsigned HOST_WIDE_INT b;
1379 if (wi::neg_p (op1))
1380 {
1381 b = 0;
1382 for (i = 0; i < half_blocks_needed; i++)
1383 {
1384 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1385 - (unsigned HOST_WIDE_INT)v[i] - b;
1386 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1387 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1388 }
1389 }
1390 if (wi::neg_p (op2))
1391 {
1392 b = 0;
1393 for (i = 0; i < half_blocks_needed; i++)
1394 {
1395 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1396 - (unsigned HOST_WIDE_INT)u[i] - b;
1397 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1398 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1399 }
1400 }
1401 }
1402
1403 if (needs_overflow)
1404 {
1405 HOST_WIDE_INT top;
1406
1407 /* For unsigned, overflow is true if any of the top bits are set.
1408 For signed, overflow is true if any of the top bits are not equal
1409 to the sign bit. */
1410 if (sgn == UNSIGNED)
1411 top = 0;
1412 else
1413 {
1414 top = r[(half_blocks_needed) - 1];
1415 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1416 top &= mask;
1417 }
1418
1419 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1420 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1421 *overflow = true;
1422 }
1423
1424 if (high)
1425 {
1426 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1427 wi_pack ((unsigned HOST_WIDE_INT *) val,
1428 &r[half_blocks_needed], half_blocks_needed);
1429 return canonize (val, blocks_needed, prec);
1430 }
1431 else
1432 {
1433 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1434 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1435 return canonize (val, blocks_needed, prec);
1436 }
1437 }
1438
1439 /* Compute the population count of X. */
1440 int
1441 wi::popcount (const wide_int_ref &x)
1442 {
1443 unsigned int i;
1444 int count;
1445
1446 /* The high order block is special if it is the last block and the
1447 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1448 have to clear out any ones above the precision before doing
1449 popcount on this block. */
1450 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1451 unsigned int stop = x.len;
1452 if (count < 0)
1453 {
1454 count = popcount_hwi (x.uhigh () << -count);
1455 stop -= 1;
1456 }
1457 else
1458 {
1459 if (x.sign_mask () >= 0)
1460 count = 0;
1461 }
1462
1463 for (i = 0; i < stop; ++i)
1464 count += popcount_hwi (x.val[i]);
1465
1466 return count;
1467 }
1468
1469 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1470 whether the result overflows when OP0 and OP1 are treated as having
1471 signedness SGN. Return the number of blocks in VAL. */
1472 unsigned int
1473 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1474 unsigned int op0len, const HOST_WIDE_INT *op1,
1475 unsigned int op1len, unsigned int prec,
1476 signop sgn, bool *overflow)
1477 {
1478 unsigned HOST_WIDE_INT o0 = 0;
1479 unsigned HOST_WIDE_INT o1 = 0;
1480 unsigned HOST_WIDE_INT x = 0;
1481 /* We implement subtraction as an in place negate and add. Negation
1482 is just inversion and add 1, so we can do the add of 1 by just
1483 starting the borrow in of the first element at 1. */
1484 unsigned HOST_WIDE_INT borrow = 0;
1485 unsigned HOST_WIDE_INT old_borrow = 0;
1486
1487 unsigned HOST_WIDE_INT mask0, mask1;
1488 unsigned int i;
1489
1490 unsigned int len = MAX (op0len, op1len);
1491 mask0 = -top_bit_of (op0, op0len, prec);
1492 mask1 = -top_bit_of (op1, op1len, prec);
1493
1494 /* Subtract all of the explicitly defined elements. */
1495 for (i = 0; i < len; i++)
1496 {
1497 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1498 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1499 x = o0 - o1 - borrow;
1500 val[i] = x;
1501 old_borrow = borrow;
1502 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1503 }
1504
1505 if (len * HOST_BITS_PER_WIDE_INT < prec)
1506 {
1507 val[len] = mask0 - mask1 - borrow;
1508 len++;
1509 if (overflow)
1510 *overflow = false;
1511 }
1512 else if (overflow)
1513 {
1514 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1515 if (sgn == SIGNED)
1516 {
1517 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1518 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1519 }
1520 else
1521 {
1522 /* Put the MSB of X and O0 and in the top of the HWI. */
1523 x <<= shift;
1524 o0 <<= shift;
1525 if (old_borrow)
1526 *overflow = (x >= o0);
1527 else
1528 *overflow = (x > o0);
1529 }
1530 }
1531
1532 return canonize (val, len, prec);
1533 }
1534
1535
1536 /*
1537 * Division and Mod
1538 */
1539
1540 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1541 algorithm is a small modification of the algorithm in Hacker's
1542 Delight by Warren, which itself is a small modification of Knuth's
1543 algorithm. M is the number of significant elements of U however
1544 there needs to be at least one extra element of B_DIVIDEND
1545 allocated, N is the number of elements of B_DIVISOR. */
1546 static void
1547 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1548 unsigned HOST_HALF_WIDE_INT *b_remainder,
1549 unsigned HOST_HALF_WIDE_INT *b_dividend,
1550 unsigned HOST_HALF_WIDE_INT *b_divisor,
1551 int m, int n)
1552 {
1553 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1554 HOST_WIDE_INT and stored in the lower bits of each word. This
1555 algorithm should work properly on both 32 and 64 bit
1556 machines. */
1557 unsigned HOST_WIDE_INT b
1558 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1559 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1560 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1561 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1562 HOST_WIDE_INT t, k;
1563 int i, j, s;
1564
1565 /* Single digit divisor. */
1566 if (n == 1)
1567 {
1568 k = 0;
1569 for (j = m - 1; j >= 0; j--)
1570 {
1571 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1572 k = ((k * b + b_dividend[j])
1573 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1574 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1575 }
1576 b_remainder[0] = k;
1577 return;
1578 }
1579
1580 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1581
1582 if (s)
1583 {
1584 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1585 algorithm, we can overwrite b_dividend and b_divisor, so we do
1586 that. */
1587 for (i = n - 1; i > 0; i--)
1588 b_divisor[i] = (b_divisor[i] << s)
1589 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1590 b_divisor[0] = b_divisor[0] << s;
1591
1592 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1593 for (i = m - 1; i > 0; i--)
1594 b_dividend[i] = (b_dividend[i] << s)
1595 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1596 b_dividend[0] = b_dividend[0] << s;
1597 }
1598
1599 /* Main loop. */
1600 for (j = m - n; j >= 0; j--)
1601 {
1602 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1603 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1604 again:
1605 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1606 {
1607 qhat -= 1;
1608 rhat += b_divisor[n-1];
1609 if (rhat < b)
1610 goto again;
1611 }
1612
1613 /* Multiply and subtract. */
1614 k = 0;
1615 for (i = 0; i < n; i++)
1616 {
1617 p = qhat * b_divisor[i];
1618 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1619 b_dividend[i + j] = t;
1620 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1621 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1622 }
1623 t = b_dividend[j+n] - k;
1624 b_dividend[j+n] = t;
1625
1626 b_quotient[j] = qhat;
1627 if (t < 0)
1628 {
1629 b_quotient[j] -= 1;
1630 k = 0;
1631 for (i = 0; i < n; i++)
1632 {
1633 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1634 b_dividend[i+j] = t;
1635 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1636 }
1637 b_dividend[j+n] += k;
1638 }
1639 }
1640 if (s)
1641 for (i = 0; i < n; i++)
1642 b_remainder[i] = (b_dividend[i] >> s)
1643 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1644 else
1645 for (i = 0; i < n; i++)
1646 b_remainder[i] = b_dividend[i];
1647 }
1648
1649
1650 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1651 the result. If QUOTIENT is nonnull, store the value of the quotient
1652 there and return the number of blocks in it. The return value is
1653 not defined otherwise. If REMAINDER is nonnull, store the value
1654 of the remainder there and store the number of blocks in
1655 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1656 the division overflowed. */
1657 unsigned int
1658 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1659 HOST_WIDE_INT *remainder,
1660 const HOST_WIDE_INT *dividend_val,
1661 unsigned int dividend_len, unsigned int dividend_prec,
1662 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1663 unsigned int divisor_prec, signop sgn,
1664 bool *oflow)
1665 {
1666 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1667 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1668 unsigned HOST_HALF_WIDE_INT
1669 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1670 unsigned HOST_HALF_WIDE_INT
1671 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1672 unsigned HOST_HALF_WIDE_INT
1673 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1674 unsigned HOST_HALF_WIDE_INT
1675 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1676 unsigned int m, n;
1677 bool dividend_neg = false;
1678 bool divisor_neg = false;
1679 bool overflow = false;
1680 wide_int neg_dividend, neg_divisor;
1681
1682 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1683 dividend_prec);
1684 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1685 divisor_prec);
1686 if (divisor == 0)
1687 overflow = true;
1688
1689 /* The smallest signed number / -1 causes overflow. The dividend_len
1690 check is for speed rather than correctness. */
1691 if (sgn == SIGNED
1692 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1693 && divisor == -1
1694 && wi::only_sign_bit_p (dividend))
1695 overflow = true;
1696
1697 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1698 (signed min / -1) has the same representation as the orignal dividend.
1699 We have traditionally made division by zero act as division by one,
1700 so there too we use the original dividend. */
1701 if (overflow)
1702 {
1703 if (remainder)
1704 {
1705 *remainder_len = 1;
1706 remainder[0] = 0;
1707 }
1708 if (oflow != 0)
1709 *oflow = true;
1710 if (quotient)
1711 for (unsigned int i = 0; i < dividend_len; ++i)
1712 quotient[i] = dividend_val[i];
1713 return dividend_len;
1714 }
1715
1716 if (oflow)
1717 *oflow = false;
1718
1719 /* Do it on the host if you can. */
1720 if (sgn == SIGNED
1721 && wi::fits_shwi_p (dividend)
1722 && wi::fits_shwi_p (divisor))
1723 {
1724 HOST_WIDE_INT o0 = dividend.to_shwi ();
1725 HOST_WIDE_INT o1 = divisor.to_shwi ();
1726
1727 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1728 {
1729 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1730 if (quotient)
1731 {
1732 quotient[0] = HOST_WIDE_INT_MIN;
1733 quotient[1] = 0;
1734 }
1735 if (remainder)
1736 {
1737 remainder[0] = 0;
1738 *remainder_len = 1;
1739 }
1740 return 2;
1741 }
1742 else
1743 {
1744 if (quotient)
1745 quotient[0] = o0 / o1;
1746 if (remainder)
1747 {
1748 remainder[0] = o0 % o1;
1749 *remainder_len = 1;
1750 }
1751 return 1;
1752 }
1753 }
1754
1755 if (sgn == UNSIGNED
1756 && wi::fits_uhwi_p (dividend)
1757 && wi::fits_uhwi_p (divisor))
1758 {
1759 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1760 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1761
1762 if (quotient)
1763 quotient[0] = o0 / o1;
1764 if (remainder)
1765 {
1766 remainder[0] = o0 % o1;
1767 *remainder_len = 1;
1768 }
1769 return 1;
1770 }
1771
1772 /* Make the divisor and dividend positive and remember what we
1773 did. */
1774 if (sgn == SIGNED)
1775 {
1776 if (wi::neg_p (dividend))
1777 {
1778 neg_dividend = -dividend;
1779 dividend = neg_dividend;
1780 dividend_neg = true;
1781 }
1782 if (wi::neg_p (divisor))
1783 {
1784 neg_divisor = -divisor;
1785 divisor = neg_divisor;
1786 divisor_neg = true;
1787 }
1788 }
1789
1790 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1791 dividend_blocks_needed, dividend_prec, sgn);
1792 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1793 divisor_blocks_needed, divisor_prec, sgn);
1794
1795 m = dividend_blocks_needed;
1796 while (m > 1 && b_dividend[m - 1] == 0)
1797 m--;
1798
1799 n = divisor_blocks_needed;
1800 while (n > 1 && b_divisor[n - 1] == 0)
1801 n--;
1802
1803 memset (b_quotient, 0, sizeof (b_quotient));
1804
1805 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1806
1807 unsigned int quotient_len = 0;
1808 if (quotient)
1809 {
1810 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1811 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1812 /* The quotient is neg if exactly one of the divisor or dividend is
1813 neg. */
1814 if (dividend_neg != divisor_neg)
1815 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1816 quotient_len, dividend_prec,
1817 UNSIGNED, 0);
1818 }
1819
1820 if (remainder)
1821 {
1822 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1823 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1824 /* The remainder is always the same sign as the dividend. */
1825 if (dividend_neg)
1826 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1827 *remainder_len, dividend_prec,
1828 UNSIGNED, 0);
1829 }
1830
1831 return quotient_len;
1832 }
1833
1834 /*
1835 * Shifting, rotating and extraction.
1836 */
1837
1838 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1839 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1840 unsigned int
1841 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1842 unsigned int xlen, unsigned int precision,
1843 unsigned int shift)
1844 {
1845 /* Split the shift into a whole-block shift and a subblock shift. */
1846 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1847 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1848
1849 /* The whole-block shift fills with zeros. */
1850 unsigned int len = BLOCKS_NEEDED (precision);
1851 for (unsigned int i = 0; i < skip; ++i)
1852 val[i] = 0;
1853
1854 /* It's easier to handle the simple block case specially. */
1855 if (small_shift == 0)
1856 for (unsigned int i = skip; i < len; ++i)
1857 val[i] = safe_uhwi (xval, xlen, i - skip);
1858 else
1859 {
1860 /* The first unfilled output block is a left shift of the first
1861 block in XVAL. The other output blocks contain bits from two
1862 consecutive input blocks. */
1863 unsigned HOST_WIDE_INT carry = 0;
1864 for (unsigned int i = skip; i < len; ++i)
1865 {
1866 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1867 val[i] = (x << small_shift) | carry;
1868 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1869 }
1870 }
1871 return canonize (val, len, precision);
1872 }
1873
1874 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1875 number of blocks in VAL. The input has XPRECISION bits and the
1876 output has XPRECISION - SHIFT bits. */
1877 static unsigned int
1878 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1879 unsigned int xlen, unsigned int xprecision,
1880 unsigned int shift)
1881 {
1882 /* Split the shift into a whole-block shift and a subblock shift. */
1883 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1884 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1885
1886 /* Work out how many blocks are needed to store the significant bits
1887 (excluding the upper zeros or signs). */
1888 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1889
1890 /* It's easier to handle the simple block case specially. */
1891 if (small_shift == 0)
1892 for (unsigned int i = 0; i < len; ++i)
1893 val[i] = safe_uhwi (xval, xlen, i + skip);
1894 else
1895 {
1896 /* Each output block but the last is a combination of two input blocks.
1897 The last block is a right shift of the last block in XVAL. */
1898 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1899 for (unsigned int i = 0; i < len; ++i)
1900 {
1901 val[i] = curr >> small_shift;
1902 curr = safe_uhwi (xval, xlen, i + skip + 1);
1903 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1904 }
1905 }
1906 return len;
1907 }
1908
1909 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1910 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1911 VAL has PRECISION bits. */
1912 unsigned int
1913 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1914 unsigned int xlen, unsigned int xprecision,
1915 unsigned int precision, unsigned int shift)
1916 {
1917 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1918
1919 /* The value we just created has precision XPRECISION - SHIFT.
1920 Zero-extend it to wider precisions. */
1921 if (precision > xprecision - shift)
1922 {
1923 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1924 if (small_prec)
1925 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1926 else if (val[len - 1] < 0)
1927 {
1928 /* Add a new block with a zero. */
1929 val[len++] = 0;
1930 return len;
1931 }
1932 }
1933 return canonize (val, len, precision);
1934 }
1935
1936 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1937 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1938 VAL has PRECISION bits. */
1939 unsigned int
1940 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1941 unsigned int xlen, unsigned int xprecision,
1942 unsigned int precision, unsigned int shift)
1943 {
1944 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1945
1946 /* The value we just created has precision XPRECISION - SHIFT.
1947 Sign-extend it to wider types. */
1948 if (precision > xprecision - shift)
1949 {
1950 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1951 if (small_prec)
1952 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1953 }
1954 return canonize (val, len, precision);
1955 }
1956
1957 /* Return the number of leading (upper) zeros in X. */
1958 int
1959 wi::clz (const wide_int_ref &x)
1960 {
1961 /* Calculate how many bits there above the highest represented block. */
1962 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1963
1964 unsigned HOST_WIDE_INT high = x.uhigh ();
1965 if (count < 0)
1966 /* The upper -COUNT bits of HIGH are not part of the value.
1967 Clear them out. */
1968 high = (high << -count) >> -count;
1969 else if (x.sign_mask () < 0)
1970 /* The upper bit is set, so there are no leading zeros. */
1971 return 0;
1972
1973 /* We don't need to look below HIGH. Either HIGH is nonzero,
1974 or the top bit of the block below is nonzero; clz_hwi is
1975 HOST_BITS_PER_WIDE_INT in the latter case. */
1976 return count + clz_hwi (high);
1977 }
1978
1979 /* Return the number of redundant sign bits in X. (That is, the number
1980 of bits immediately below the sign bit that have the same value as
1981 the sign bit.) */
1982 int
1983 wi::clrsb (const wide_int_ref &x)
1984 {
1985 /* Calculate how many bits there above the highest represented block. */
1986 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1987
1988 unsigned HOST_WIDE_INT high = x.uhigh ();
1989 unsigned HOST_WIDE_INT mask = -1;
1990 if (count < 0)
1991 {
1992 /* The upper -COUNT bits of HIGH are not part of the value.
1993 Clear them from both MASK and HIGH. */
1994 mask >>= -count;
1995 high &= mask;
1996 }
1997
1998 /* If the top bit is 1, count the number of leading 1s. If the top
1999 bit is zero, count the number of leading zeros. */
2000 if (high > mask / 2)
2001 high ^= mask;
2002
2003 /* There are no sign bits below the top block, so we don't need to look
2004 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2005 HIGH is 0. */
2006 return count + clz_hwi (high) - 1;
2007 }
2008
2009 /* Return the number of trailing (lower) zeros in X. */
2010 int
2011 wi::ctz (const wide_int_ref &x)
2012 {
2013 if (x.len == 1 && x.ulow () == 0)
2014 return x.precision;
2015
2016 /* Having dealt with the zero case, there must be a block with a
2017 nonzero bit. We don't care about the bits above the first 1. */
2018 unsigned int i = 0;
2019 while (x.val[i] == 0)
2020 ++i;
2021 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2022 }
2023
2024 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2025 return -1. */
2026 int
2027 wi::exact_log2 (const wide_int_ref &x)
2028 {
2029 /* Reject cases where there are implicit -1 blocks above HIGH. */
2030 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2031 return -1;
2032
2033 /* Set CRUX to the index of the entry that should be nonzero.
2034 If the top block is zero then the next lowest block (if any)
2035 must have the high bit set. */
2036 unsigned int crux = x.len - 1;
2037 if (crux > 0 && x.val[crux] == 0)
2038 crux -= 1;
2039
2040 /* Check that all lower blocks are zero. */
2041 for (unsigned int i = 0; i < crux; ++i)
2042 if (x.val[i] != 0)
2043 return -1;
2044
2045 /* Get a zero-extended form of block CRUX. */
2046 unsigned HOST_WIDE_INT hwi = x.val[crux];
2047 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2048 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2049
2050 /* Now it's down to whether HWI is a power of 2. */
2051 int res = ::exact_log2 (hwi);
2052 if (res >= 0)
2053 res += crux * HOST_BITS_PER_WIDE_INT;
2054 return res;
2055 }
2056
2057 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2058 int
2059 wi::floor_log2 (const wide_int_ref &x)
2060 {
2061 return x.precision - 1 - clz (x);
2062 }
2063
2064 /* Return the index of the first (lowest) set bit in X, counting from 1.
2065 Return 0 if X is 0. */
2066 int
2067 wi::ffs (const wide_int_ref &x)
2068 {
2069 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2070 }
2071
2072 /* Return true if sign-extending X to have precision PRECISION would give
2073 the minimum signed value at that precision. */
2074 bool
2075 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2076 {
2077 return ctz (x) + 1 == int (precision);
2078 }
2079
2080 /* Return true if X represents the minimum signed value. */
2081 bool
2082 wi::only_sign_bit_p (const wide_int_ref &x)
2083 {
2084 return only_sign_bit_p (x, x.precision);
2085 }
2086
2087 /*
2088 * Private utilities.
2089 */
2090
2091 void gt_ggc_mx (widest_int *) { }
2092 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2093 void gt_pch_nx (widest_int *) { }
2094
2095 template void wide_int::dump () const;
2096 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2097 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2098 template void offset_int::dump () const;
2099 template void widest_int::dump () const;