IA MCU psABI support: changes to libraries
[gcc.git] / gcc / wide-int.cc
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "vec.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "inchash.h"
30 #include "tree.h"
31 #include "dumpfile.h"
32
33
34 #define HOST_BITS_PER_HALF_WIDE_INT 32
35 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
36 # define HOST_HALF_WIDE_INT long
37 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
38 # define HOST_HALF_WIDE_INT int
39 #else
40 #error Please add support for HOST_HALF_WIDE_INT
41 #endif
42
43 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
44 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
45 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
46 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
47 typedef unsigned HOST_WIDE_INT UWtype;
48 typedef unsigned int UQItype __attribute__ ((mode (QI)));
49 typedef unsigned int USItype __attribute__ ((mode (SI)));
50 typedef unsigned int UDItype __attribute__ ((mode (DI)));
51 #if W_TYPE_SIZE == 32
52 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
53 #else
54 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
55 #endif
56 #include "longlong.h"
57 #endif
58
59 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
60
61 /*
62 * Internal utilities.
63 */
64
65 /* Quantities to deal with values that hold half of a wide int. Used
66 in multiply and divide. */
67 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
68
69 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
70 #define BLOCKS_NEEDED(PREC) \
71 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
72 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
73
74 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
75 based on the top existing bit of VAL. */
76
77 static unsigned HOST_WIDE_INT
78 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
79 {
80 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
81 }
82
83 /* Convert the integer in VAL to canonical form, returning its new length.
84 LEN is the number of blocks currently in VAL and PRECISION is the number
85 of bits in the integer it represents.
86
87 This function only changes the representation, not the value. */
88 static unsigned int
89 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
90 {
91 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
92 HOST_WIDE_INT top;
93 int i;
94
95 if (len > blocks_needed)
96 len = blocks_needed;
97
98 if (len == 1)
99 return len;
100
101 top = val[len - 1];
102 if (len * HOST_BITS_PER_WIDE_INT > precision)
103 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
104 if (top != 0 && top != (HOST_WIDE_INT)-1)
105 return len;
106
107 /* At this point we know that the top is either 0 or -1. Find the
108 first block that is not a copy of this. */
109 for (i = len - 2; i >= 0; i--)
110 {
111 HOST_WIDE_INT x = val[i];
112 if (x != top)
113 {
114 if (SIGN_MASK (x) == top)
115 return i + 1;
116
117 /* We need an extra block because the top bit block i does
118 not match the extension. */
119 return i + 2;
120 }
121 }
122
123 /* The number is 0 or -1. */
124 return 1;
125 }
126
127 /*
128 * Conversion routines in and out of wide_int.
129 */
130
131 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
132 result for an integer with precision PRECISION. Return the length
133 of VAL (after any canonization. */
134 unsigned int
135 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
136 unsigned int xlen, unsigned int precision, bool need_canon)
137 {
138 for (unsigned i = 0; i < xlen; i++)
139 val[i] = xval[i];
140 return need_canon ? canonize (val, xlen, precision) : xlen;
141 }
142
143 /* Construct a wide int from a buffer of length LEN. BUFFER will be
144 read according to byte endianess and word endianess of the target.
145 Only the lower BUFFER_LEN bytes of the result are set; the remaining
146 high bytes are cleared. */
147 wide_int
148 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
149 {
150 unsigned int precision = buffer_len * BITS_PER_UNIT;
151 wide_int result = wide_int::create (precision);
152 unsigned int words = buffer_len / UNITS_PER_WORD;
153
154 /* We have to clear all the bits ourself, as we merely or in values
155 below. */
156 unsigned int len = BLOCKS_NEEDED (precision);
157 HOST_WIDE_INT *val = result.write_val ();
158 for (unsigned int i = 0; i < len; ++i)
159 val[i] = 0;
160
161 for (unsigned int byte = 0; byte < buffer_len; byte++)
162 {
163 unsigned int offset;
164 unsigned int index;
165 unsigned int bitpos = byte * BITS_PER_UNIT;
166 unsigned HOST_WIDE_INT value;
167
168 if (buffer_len > UNITS_PER_WORD)
169 {
170 unsigned int word = byte / UNITS_PER_WORD;
171
172 if (WORDS_BIG_ENDIAN)
173 word = (words - 1) - word;
174
175 offset = word * UNITS_PER_WORD;
176
177 if (BYTES_BIG_ENDIAN)
178 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
179 else
180 offset += byte % UNITS_PER_WORD;
181 }
182 else
183 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
184
185 value = (unsigned HOST_WIDE_INT) buffer[offset];
186
187 index = bitpos / HOST_BITS_PER_WIDE_INT;
188 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
189 }
190
191 result.set_len (canonize (val, len, precision));
192
193 return result;
194 }
195
196 /* Sets RESULT from X, the sign is taken according to SGN. */
197 void
198 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
199 {
200 int len = x.get_len ();
201 const HOST_WIDE_INT *v = x.get_val ();
202 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
203
204 if (wi::neg_p (x, sgn))
205 {
206 /* We use ones complement to avoid -x80..0 edge case that -
207 won't work on. */
208 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
209 for (int i = 0; i < len; i++)
210 t[i] = ~v[i];
211 if (excess > 0)
212 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
213 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
214 mpz_com (result, result);
215 }
216 else if (excess > 0)
217 {
218 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
219 for (int i = 0; i < len - 1; i++)
220 t[i] = v[i];
221 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 }
224 else
225 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
226 }
227
228 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
229 values of VAL will be wrapped; otherwise, they will be set to the
230 appropriate minimum or maximum TYPE bound. */
231 wide_int
232 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
233 {
234 size_t count, numb;
235 unsigned int prec = TYPE_PRECISION (type);
236 wide_int res = wide_int::create (prec);
237
238 if (!wrap)
239 {
240 mpz_t min, max;
241
242 mpz_init (min);
243 mpz_init (max);
244 get_type_static_bounds (type, min, max);
245
246 if (mpz_cmp (x, min) < 0)
247 mpz_set (x, min);
248 else if (mpz_cmp (x, max) > 0)
249 mpz_set (x, max);
250
251 mpz_clear (min);
252 mpz_clear (max);
253 }
254
255 /* Determine the number of unsigned HOST_WIDE_INTs that are required
256 for representing the value. The code to calculate count is
257 extracted from the GMP manual, section "Integer Import and Export":
258 http://gmplib.org/manual/Integer-Import-and-Export.html */
259 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
260 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
261 HOST_WIDE_INT *val = res.write_val ();
262 /* Write directly to the wide_int storage if possible, otherwise leave
263 GMP to allocate the memory for us. It might be slightly more efficient
264 to use mpz_tdiv_r_2exp for the latter case, but the situation is
265 pathological and it seems safer to operate on the original mpz value
266 in all cases. */
267 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
268 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
269 if (count < 1)
270 {
271 val[0] = 0;
272 count = 1;
273 }
274 count = MIN (count, BLOCKS_NEEDED (prec));
275 if (valres != val)
276 {
277 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
278 free (valres);
279 }
280 res.set_len (canonize (val, count, prec));
281
282 if (mpz_sgn (x) < 0)
283 res = -res;
284
285 return res;
286 }
287
288 /*
289 * Largest and smallest values in a mode.
290 */
291
292 /* Return the largest SGNed number that is representable in PRECISION bits.
293
294 TODO: There is still code from the double_int era that trys to
295 make up for the fact that double int's could not represent the
296 min and max values of all types. This code should be removed
297 because the min and max values can always be represented in
298 wide_ints and int-csts. */
299 wide_int
300 wi::max_value (unsigned int precision, signop sgn)
301 {
302 gcc_checking_assert (precision != 0);
303 if (sgn == UNSIGNED)
304 /* The unsigned max is just all ones. */
305 return shwi (-1, precision);
306 else
307 /* The signed max is all ones except the top bit. This must be
308 explicitly represented. */
309 return mask (precision - 1, false, precision);
310 }
311
312 /* Return the largest SGNed number that is representable in PRECISION bits. */
313 wide_int
314 wi::min_value (unsigned int precision, signop sgn)
315 {
316 gcc_checking_assert (precision != 0);
317 if (sgn == UNSIGNED)
318 return uhwi (0, precision);
319 else
320 /* The signed min is all zeros except the top bit. This must be
321 explicitly represented. */
322 return wi::set_bit_in_zero (precision - 1, precision);
323 }
324
325 /*
326 * Public utilities.
327 */
328
329 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
330 signedness SGN, to an integer that has PRECISION bits. Store the blocks
331 in VAL and return the number of blocks used.
332
333 This function can handle both extension (PRECISION > XPRECISION)
334 and truncation (PRECISION < XPRECISION). */
335 unsigned int
336 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
337 unsigned int xlen, unsigned int xprecision,
338 unsigned int precision, signop sgn)
339 {
340 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
341 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
342 for (unsigned i = 0; i < len; i++)
343 val[i] = xval[i];
344
345 if (precision > xprecision)
346 {
347 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
348
349 /* Expanding. */
350 if (sgn == UNSIGNED)
351 {
352 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
353 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
354 else if (val[len - 1] < 0)
355 {
356 while (len < BLOCKS_NEEDED (xprecision))
357 val[len++] = -1;
358 if (small_xprecision)
359 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
360 else
361 val[len++] = 0;
362 }
363 }
364 else
365 {
366 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
367 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
368 }
369 }
370 len = canonize (val, len, precision);
371
372 return len;
373 }
374
375 /* This function hides the fact that we cannot rely on the bits beyond
376 the precision. This issue comes up in the relational comparisions
377 where we do allow comparisons of values of different precisions. */
378 static inline HOST_WIDE_INT
379 selt (const HOST_WIDE_INT *a, unsigned int len,
380 unsigned int blocks_needed, unsigned int small_prec,
381 unsigned int index, signop sgn)
382 {
383 HOST_WIDE_INT val;
384 if (index < len)
385 val = a[index];
386 else if (index < blocks_needed || sgn == SIGNED)
387 /* Signed or within the precision. */
388 val = SIGN_MASK (a[len - 1]);
389 else
390 /* Unsigned extension beyond the precision. */
391 val = 0;
392
393 if (small_prec && index == blocks_needed - 1)
394 return (sgn == SIGNED
395 ? sext_hwi (val, small_prec)
396 : zext_hwi (val, small_prec));
397 else
398 return val;
399 }
400
401 /* Find the highest bit represented in a wide int. This will in
402 general have the same value as the sign bit. */
403 static inline HOST_WIDE_INT
404 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
405 {
406 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
407 unsigned HOST_WIDE_INT val = a[len - 1];
408 if (excess > 0)
409 val <<= excess;
410 return val >> (HOST_BITS_PER_WIDE_INT - 1);
411 }
412
413 /*
414 * Comparisons, note that only equality is an operator. The other
415 * comparisons cannot be operators since they are inherently signed or
416 * unsigned and C++ has no such operators.
417 */
418
419 /* Return true if OP0 == OP1. */
420 bool
421 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
422 const HOST_WIDE_INT *op1, unsigned int op1len,
423 unsigned int prec)
424 {
425 int l0 = op0len - 1;
426 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
427
428 if (op0len != op1len)
429 return false;
430
431 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
432 {
433 /* It does not matter if we zext or sext here, we just have to
434 do both the same way. */
435 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
436 return false;
437 l0--;
438 }
439
440 while (l0 >= 0)
441 if (op0[l0] != op1[l0])
442 return false;
443 else
444 l0--;
445
446 return true;
447 }
448
449 /* Return true if OP0 < OP1 using signed comparisons. */
450 bool
451 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
452 unsigned int precision,
453 const HOST_WIDE_INT *op1, unsigned int op1len)
454 {
455 HOST_WIDE_INT s0, s1;
456 unsigned HOST_WIDE_INT u0, u1;
457 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
458 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
459 int l = MAX (op0len - 1, op1len - 1);
460
461 /* Only the top block is compared as signed. The rest are unsigned
462 comparisons. */
463 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
464 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
465 if (s0 < s1)
466 return true;
467 if (s0 > s1)
468 return false;
469
470 l--;
471 while (l >= 0)
472 {
473 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
474 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
475
476 if (u0 < u1)
477 return true;
478 if (u0 > u1)
479 return false;
480 l--;
481 }
482
483 return false;
484 }
485
486 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
487 signed compares. */
488 int
489 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
490 unsigned int precision,
491 const HOST_WIDE_INT *op1, unsigned int op1len)
492 {
493 HOST_WIDE_INT s0, s1;
494 unsigned HOST_WIDE_INT u0, u1;
495 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
496 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
497 int l = MAX (op0len - 1, op1len - 1);
498
499 /* Only the top block is compared as signed. The rest are unsigned
500 comparisons. */
501 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
502 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
503 if (s0 < s1)
504 return -1;
505 if (s0 > s1)
506 return 1;
507
508 l--;
509 while (l >= 0)
510 {
511 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
512 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
513
514 if (u0 < u1)
515 return -1;
516 if (u0 > u1)
517 return 1;
518 l--;
519 }
520
521 return 0;
522 }
523
524 /* Return true if OP0 < OP1 using unsigned comparisons. */
525 bool
526 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
527 unsigned int precision,
528 const HOST_WIDE_INT *op1, unsigned int op1len)
529 {
530 unsigned HOST_WIDE_INT x0;
531 unsigned HOST_WIDE_INT x1;
532 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
533 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
534 int l = MAX (op0len - 1, op1len - 1);
535
536 while (l >= 0)
537 {
538 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
539 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
540 if (x0 < x1)
541 return true;
542 if (x0 > x1)
543 return false;
544 l--;
545 }
546
547 return false;
548 }
549
550 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
551 unsigned compares. */
552 int
553 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
554 unsigned int precision,
555 const HOST_WIDE_INT *op1, unsigned int op1len)
556 {
557 unsigned HOST_WIDE_INT x0;
558 unsigned HOST_WIDE_INT x1;
559 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
560 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
561 int l = MAX (op0len - 1, op1len - 1);
562
563 while (l >= 0)
564 {
565 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
566 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
567 if (x0 < x1)
568 return -1;
569 if (x0 > x1)
570 return 1;
571 l--;
572 }
573
574 return 0;
575 }
576
577 /*
578 * Extension.
579 */
580
581 /* Sign-extend the number represented by XVAL and XLEN into VAL,
582 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
583 and VAL have PRECISION bits. */
584 unsigned int
585 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
586 unsigned int xlen, unsigned int precision, unsigned int offset)
587 {
588 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
589 /* Extending beyond the precision is a no-op. If we have only stored
590 OFFSET bits or fewer, the rest are already signs. */
591 if (offset >= precision || len >= xlen)
592 {
593 for (unsigned i = 0; i < xlen; ++i)
594 val[i] = xval[i];
595 return xlen;
596 }
597 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
598 for (unsigned int i = 0; i < len; i++)
599 val[i] = xval[i];
600 if (suboffset > 0)
601 {
602 val[len] = sext_hwi (xval[len], suboffset);
603 len += 1;
604 }
605 return canonize (val, len, precision);
606 }
607
608 /* Zero-extend the number represented by XVAL and XLEN into VAL,
609 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
610 and VAL have PRECISION bits. */
611 unsigned int
612 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
613 unsigned int xlen, unsigned int precision, unsigned int offset)
614 {
615 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
616 /* Extending beyond the precision is a no-op. If we have only stored
617 OFFSET bits or fewer, and the upper stored bit is zero, then there
618 is nothing to do. */
619 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
620 {
621 for (unsigned i = 0; i < xlen; ++i)
622 val[i] = xval[i];
623 return xlen;
624 }
625 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 for (unsigned int i = 0; i < len; i++)
627 val[i] = i < xlen ? xval[i] : -1;
628 if (suboffset > 0)
629 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
630 else
631 val[len] = 0;
632 return canonize (val, len + 1, precision);
633 }
634
635 /*
636 * Masking, inserting, shifting, rotating.
637 */
638
639 /* Insert WIDTH bits from Y into X starting at START. */
640 wide_int
641 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
642 unsigned int width)
643 {
644 wide_int result;
645 wide_int mask;
646 wide_int tmp;
647
648 unsigned int precision = x.get_precision ();
649 if (start >= precision)
650 return x;
651
652 gcc_checking_assert (precision >= width);
653
654 if (start + width >= precision)
655 width = precision - start;
656
657 mask = wi::shifted_mask (start, width, false, precision);
658 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
659 result = tmp & mask;
660
661 tmp = wi::bit_and_not (x, mask);
662 result = result | tmp;
663
664 return result;
665 }
666
667 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
668 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
669 bits. */
670 unsigned int
671 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
672 unsigned int xlen, unsigned int precision, unsigned int bit)
673 {
674 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
675 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
676
677 if (block + 1 >= xlen)
678 {
679 /* The operation either affects the last current block or needs
680 a new block. */
681 unsigned int len = block + 1;
682 for (unsigned int i = 0; i < len; i++)
683 val[i] = safe_uhwi (xval, xlen, i);
684 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
685
686 /* If the bit we just set is at the msb of the block, make sure
687 that any higher bits are zeros. */
688 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
689 val[len++] = 0;
690 return len;
691 }
692 else
693 {
694 for (unsigned int i = 0; i < xlen; i++)
695 val[i] = xval[i];
696 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
697 return canonize (val, xlen, precision);
698 }
699 }
700
701 /* bswap THIS. */
702 wide_int
703 wide_int_storage::bswap () const
704 {
705 wide_int result = wide_int::create (precision);
706 unsigned int i, s;
707 unsigned int len = BLOCKS_NEEDED (precision);
708 unsigned int xlen = get_len ();
709 const HOST_WIDE_INT *xval = get_val ();
710 HOST_WIDE_INT *val = result.write_val ();
711
712 /* This is not a well defined operation if the precision is not a
713 multiple of 8. */
714 gcc_assert ((precision & 0x7) == 0);
715
716 for (i = 0; i < len; i++)
717 val[i] = 0;
718
719 /* Only swap the bytes that are not the padding. */
720 for (s = 0; s < precision; s += 8)
721 {
722 unsigned int d = precision - s - 8;
723 unsigned HOST_WIDE_INT byte;
724
725 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
726 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
727
728 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
729
730 block = d / HOST_BITS_PER_WIDE_INT;
731 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
732
733 val[block] |= byte << offset;
734 }
735
736 result.set_len (canonize (val, len, precision));
737 return result;
738 }
739
740 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
741 above that up to PREC are zeros. The result is inverted if NEGATE
742 is true. Return the number of blocks in VAL. */
743 unsigned int
744 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
745 unsigned int prec)
746 {
747 if (width >= prec)
748 {
749 val[0] = negate ? 0 : -1;
750 return 1;
751 }
752 else if (width == 0)
753 {
754 val[0] = negate ? -1 : 0;
755 return 1;
756 }
757
758 unsigned int i = 0;
759 while (i < width / HOST_BITS_PER_WIDE_INT)
760 val[i++] = negate ? 0 : -1;
761
762 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
763 if (shift != 0)
764 {
765 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
766 val[i++] = negate ? ~last : last;
767 }
768 else
769 val[i++] = negate ? -1 : 0;
770
771 return i;
772 }
773
774 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
775 bits are ones, and the bits above that up to PREC are zeros. The result
776 is inverted if NEGATE is true. Return the number of blocks in VAL. */
777 unsigned int
778 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
779 bool negate, unsigned int prec)
780 {
781 if (start >= prec || width == 0)
782 {
783 val[0] = negate ? -1 : 0;
784 return 1;
785 }
786
787 if (width > prec - start)
788 width = prec - start;
789 unsigned int end = start + width;
790
791 unsigned int i = 0;
792 while (i < start / HOST_BITS_PER_WIDE_INT)
793 val[i++] = negate ? -1 : 0;
794
795 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
796 if (shift)
797 {
798 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
799 shift += width;
800 if (shift < HOST_BITS_PER_WIDE_INT)
801 {
802 /* case 000111000 */
803 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
804 val[i++] = negate ? ~block : block;
805 return i;
806 }
807 else
808 /* ...111000 */
809 val[i++] = negate ? block : ~block;
810 }
811
812 while (i < end / HOST_BITS_PER_WIDE_INT)
813 /* 1111111 */
814 val[i++] = negate ? 0 : -1;
815
816 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
817 if (shift != 0)
818 {
819 /* 000011111 */
820 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
821 val[i++] = negate ? ~block : block;
822 }
823 else if (end < prec)
824 val[i++] = negate ? -1 : 0;
825
826 return i;
827 }
828
829 /*
830 * logical operations.
831 */
832
833 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
834 unsigned int
835 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
836 unsigned int op0len, const HOST_WIDE_INT *op1,
837 unsigned int op1len, unsigned int prec)
838 {
839 int l0 = op0len - 1;
840 int l1 = op1len - 1;
841 bool need_canon = true;
842
843 unsigned int len = MAX (op0len, op1len);
844 if (l0 > l1)
845 {
846 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
847 if (op1mask == 0)
848 {
849 l0 = l1;
850 len = l1 + 1;
851 }
852 else
853 {
854 need_canon = false;
855 while (l0 > l1)
856 {
857 val[l0] = op0[l0];
858 l0--;
859 }
860 }
861 }
862 else if (l1 > l0)
863 {
864 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
865 if (op0mask == 0)
866 len = l0 + 1;
867 else
868 {
869 need_canon = false;
870 while (l1 > l0)
871 {
872 val[l1] = op1[l1];
873 l1--;
874 }
875 }
876 }
877
878 while (l0 >= 0)
879 {
880 val[l0] = op0[l0] & op1[l0];
881 l0--;
882 }
883
884 if (need_canon)
885 len = canonize (val, len, prec);
886
887 return len;
888 }
889
890 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
891 unsigned int
892 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
893 unsigned int op0len, const HOST_WIDE_INT *op1,
894 unsigned int op1len, unsigned int prec)
895 {
896 wide_int result;
897 int l0 = op0len - 1;
898 int l1 = op1len - 1;
899 bool need_canon = true;
900
901 unsigned int len = MAX (op0len, op1len);
902 if (l0 > l1)
903 {
904 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
905 if (op1mask != 0)
906 {
907 l0 = l1;
908 len = l1 + 1;
909 }
910 else
911 {
912 need_canon = false;
913 while (l0 > l1)
914 {
915 val[l0] = op0[l0];
916 l0--;
917 }
918 }
919 }
920 else if (l1 > l0)
921 {
922 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
923 if (op0mask == 0)
924 len = l0 + 1;
925 else
926 {
927 need_canon = false;
928 while (l1 > l0)
929 {
930 val[l1] = ~op1[l1];
931 l1--;
932 }
933 }
934 }
935
936 while (l0 >= 0)
937 {
938 val[l0] = op0[l0] & ~op1[l0];
939 l0--;
940 }
941
942 if (need_canon)
943 len = canonize (val, len, prec);
944
945 return len;
946 }
947
948 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
949 unsigned int
950 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
951 unsigned int op0len, const HOST_WIDE_INT *op1,
952 unsigned int op1len, unsigned int prec)
953 {
954 wide_int result;
955 int l0 = op0len - 1;
956 int l1 = op1len - 1;
957 bool need_canon = true;
958
959 unsigned int len = MAX (op0len, op1len);
960 if (l0 > l1)
961 {
962 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
963 if (op1mask != 0)
964 {
965 l0 = l1;
966 len = l1 + 1;
967 }
968 else
969 {
970 need_canon = false;
971 while (l0 > l1)
972 {
973 val[l0] = op0[l0];
974 l0--;
975 }
976 }
977 }
978 else if (l1 > l0)
979 {
980 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
981 if (op0mask != 0)
982 len = l0 + 1;
983 else
984 {
985 need_canon = false;
986 while (l1 > l0)
987 {
988 val[l1] = op1[l1];
989 l1--;
990 }
991 }
992 }
993
994 while (l0 >= 0)
995 {
996 val[l0] = op0[l0] | op1[l0];
997 l0--;
998 }
999
1000 if (need_canon)
1001 len = canonize (val, len, prec);
1002
1003 return len;
1004 }
1005
1006 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1007 unsigned int
1008 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1009 unsigned int op0len, const HOST_WIDE_INT *op1,
1010 unsigned int op1len, unsigned int prec)
1011 {
1012 wide_int result;
1013 int l0 = op0len - 1;
1014 int l1 = op1len - 1;
1015 bool need_canon = true;
1016
1017 unsigned int len = MAX (op0len, op1len);
1018 if (l0 > l1)
1019 {
1020 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1021 if (op1mask == 0)
1022 {
1023 l0 = l1;
1024 len = l1 + 1;
1025 }
1026 else
1027 {
1028 need_canon = false;
1029 while (l0 > l1)
1030 {
1031 val[l0] = op0[l0];
1032 l0--;
1033 }
1034 }
1035 }
1036 else if (l1 > l0)
1037 {
1038 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1039 if (op0mask != 0)
1040 len = l0 + 1;
1041 else
1042 {
1043 need_canon = false;
1044 while (l1 > l0)
1045 {
1046 val[l1] = ~op1[l1];
1047 l1--;
1048 }
1049 }
1050 }
1051
1052 while (l0 >= 0)
1053 {
1054 val[l0] = op0[l0] | ~op1[l0];
1055 l0--;
1056 }
1057
1058 if (need_canon)
1059 len = canonize (val, len, prec);
1060
1061 return len;
1062 }
1063
1064 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1065 unsigned int
1066 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1067 unsigned int op0len, const HOST_WIDE_INT *op1,
1068 unsigned int op1len, unsigned int prec)
1069 {
1070 wide_int result;
1071 int l0 = op0len - 1;
1072 int l1 = op1len - 1;
1073
1074 unsigned int len = MAX (op0len, op1len);
1075 if (l0 > l1)
1076 {
1077 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1078 while (l0 > l1)
1079 {
1080 val[l0] = op0[l0] ^ op1mask;
1081 l0--;
1082 }
1083 }
1084
1085 if (l1 > l0)
1086 {
1087 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1088 while (l1 > l0)
1089 {
1090 val[l1] = op0mask ^ op1[l1];
1091 l1--;
1092 }
1093 }
1094
1095 while (l0 >= 0)
1096 {
1097 val[l0] = op0[l0] ^ op1[l0];
1098 l0--;
1099 }
1100
1101 return canonize (val, len, prec);
1102 }
1103
1104 /*
1105 * math
1106 */
1107
1108 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1109 whether the result overflows when OP0 and OP1 are treated as having
1110 signedness SGN. Return the number of blocks in VAL. */
1111 unsigned int
1112 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1113 unsigned int op0len, const HOST_WIDE_INT *op1,
1114 unsigned int op1len, unsigned int prec,
1115 signop sgn, bool *overflow)
1116 {
1117 unsigned HOST_WIDE_INT o0 = 0;
1118 unsigned HOST_WIDE_INT o1 = 0;
1119 unsigned HOST_WIDE_INT x = 0;
1120 unsigned HOST_WIDE_INT carry = 0;
1121 unsigned HOST_WIDE_INT old_carry = 0;
1122 unsigned HOST_WIDE_INT mask0, mask1;
1123 unsigned int i;
1124
1125 unsigned int len = MAX (op0len, op1len);
1126 mask0 = -top_bit_of (op0, op0len, prec);
1127 mask1 = -top_bit_of (op1, op1len, prec);
1128 /* Add all of the explicitly defined elements. */
1129
1130 for (i = 0; i < len; i++)
1131 {
1132 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1133 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1134 x = o0 + o1 + carry;
1135 val[i] = x;
1136 old_carry = carry;
1137 carry = carry == 0 ? x < o0 : x <= o0;
1138 }
1139
1140 if (len * HOST_BITS_PER_WIDE_INT < prec)
1141 {
1142 val[len] = mask0 + mask1 + carry;
1143 len++;
1144 if (overflow)
1145 *overflow = false;
1146 }
1147 else if (overflow)
1148 {
1149 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1150 if (sgn == SIGNED)
1151 {
1152 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1153 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1154 }
1155 else
1156 {
1157 /* Put the MSB of X and O0 and in the top of the HWI. */
1158 x <<= shift;
1159 o0 <<= shift;
1160 if (old_carry)
1161 *overflow = (x <= o0);
1162 else
1163 *overflow = (x < o0);
1164 }
1165 }
1166
1167 return canonize (val, len, prec);
1168 }
1169
1170 /* Subroutines of the multiplication and division operations. Unpack
1171 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1172 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1173 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1174 static void
1175 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1176 unsigned int in_len, unsigned int out_len,
1177 unsigned int prec, signop sgn)
1178 {
1179 unsigned int i;
1180 unsigned int j = 0;
1181 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1182 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1183 HOST_WIDE_INT mask;
1184
1185 if (sgn == SIGNED)
1186 {
1187 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1188 mask &= HALF_INT_MASK;
1189 }
1190 else
1191 mask = 0;
1192
1193 for (i = 0; i < blocks_needed - 1; i++)
1194 {
1195 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1196 result[j++] = x;
1197 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1198 }
1199
1200 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1201 if (small_prec)
1202 {
1203 if (sgn == SIGNED)
1204 x = sext_hwi (x, small_prec);
1205 else
1206 x = zext_hwi (x, small_prec);
1207 }
1208 result[j++] = x;
1209 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1210
1211 /* Smear the sign bit. */
1212 while (j < out_len)
1213 result[j++] = mask;
1214 }
1215
1216 /* The inverse of wi_unpack. IN_LEN is the the number of input
1217 blocks. The number of output blocks will be half this amount. */
1218 static void
1219 wi_pack (unsigned HOST_WIDE_INT *result,
1220 const unsigned HOST_HALF_WIDE_INT *input,
1221 unsigned int in_len)
1222 {
1223 unsigned int i = 0;
1224 unsigned int j = 0;
1225
1226 while (i + 2 < in_len)
1227 {
1228 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1229 | ((unsigned HOST_WIDE_INT)input[i + 1]
1230 << HOST_BITS_PER_HALF_WIDE_INT);
1231 i += 2;
1232 }
1233
1234 /* Handle the case where in_len is odd. For this we zero extend. */
1235 if (in_len & 1)
1236 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1237 else
1238 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1239 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1240 }
1241
1242 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1243 result is returned.
1244
1245 If HIGH is not set, throw away the upper half after the check is
1246 made to see if it overflows. Unfortunately there is no better way
1247 to check for overflow than to do this. If OVERFLOW is nonnull,
1248 record in *OVERFLOW whether the result overflowed. SGN controls
1249 the signedness and is used to check overflow or if HIGH is set. */
1250 unsigned int
1251 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1252 unsigned int op1len, const HOST_WIDE_INT *op2val,
1253 unsigned int op2len, unsigned int prec, signop sgn,
1254 bool *overflow, bool high)
1255 {
1256 unsigned HOST_WIDE_INT o0, o1, k, t;
1257 unsigned int i;
1258 unsigned int j;
1259 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1260 unsigned int half_blocks_needed = blocks_needed * 2;
1261 /* The sizes here are scaled to support a 2x largest mode by 2x
1262 largest mode yielding a 4x largest mode result. This is what is
1263 needed by vpn. */
1264
1265 unsigned HOST_HALF_WIDE_INT
1266 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1267 unsigned HOST_HALF_WIDE_INT
1268 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1269 /* The '2' in 'R' is because we are internally doing a full
1270 multiply. */
1271 unsigned HOST_HALF_WIDE_INT
1272 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1273 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1274
1275 /* If the top level routine did not really pass in an overflow, then
1276 just make sure that we never attempt to set it. */
1277 bool needs_overflow = (overflow != 0);
1278 if (needs_overflow)
1279 *overflow = false;
1280
1281 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1282 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1283
1284 /* This is a surprisingly common case, so do it first. */
1285 if (op1 == 0 || op2 == 0)
1286 {
1287 val[0] = 0;
1288 return 1;
1289 }
1290
1291 #ifdef umul_ppmm
1292 if (sgn == UNSIGNED)
1293 {
1294 /* If the inputs are single HWIs and the output has room for at
1295 least two HWIs, we can use umul_ppmm directly. */
1296 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1297 && wi::fits_uhwi_p (op1)
1298 && wi::fits_uhwi_p (op2))
1299 {
1300 /* This case never overflows. */
1301 if (high)
1302 {
1303 val[0] = 0;
1304 return 1;
1305 }
1306 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1307 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1308 {
1309 val[2] = 0;
1310 return 3;
1311 }
1312 return 1 + (val[1] != 0 || val[0] < 0);
1313 }
1314 /* Likewise if the output is a full single HWI, except that the
1315 upper HWI of the result is only used for determining overflow.
1316 (We handle this case inline when overflow isn't needed.) */
1317 else if (prec == HOST_BITS_PER_WIDE_INT)
1318 {
1319 unsigned HOST_WIDE_INT upper;
1320 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1321 if (needs_overflow)
1322 *overflow = (upper != 0);
1323 if (high)
1324 val[0] = upper;
1325 return 1;
1326 }
1327 }
1328 #endif
1329
1330 /* Handle multiplications by 1. */
1331 if (op1 == 1)
1332 {
1333 if (high)
1334 {
1335 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1336 return 1;
1337 }
1338 for (i = 0; i < op2len; i++)
1339 val[i] = op2val[i];
1340 return op2len;
1341 }
1342 if (op2 == 1)
1343 {
1344 if (high)
1345 {
1346 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1347 return 1;
1348 }
1349 for (i = 0; i < op1len; i++)
1350 val[i] = op1val[i];
1351 return op1len;
1352 }
1353
1354 /* If we need to check for overflow, we can only do half wide
1355 multiplies quickly because we need to look at the top bits to
1356 check for the overflow. */
1357 if ((high || needs_overflow)
1358 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1359 {
1360 unsigned HOST_WIDE_INT r;
1361
1362 if (sgn == SIGNED)
1363 {
1364 o0 = op1.to_shwi ();
1365 o1 = op2.to_shwi ();
1366 }
1367 else
1368 {
1369 o0 = op1.to_uhwi ();
1370 o1 = op2.to_uhwi ();
1371 }
1372
1373 r = o0 * o1;
1374 if (needs_overflow)
1375 {
1376 if (sgn == SIGNED)
1377 {
1378 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1379 *overflow = true;
1380 }
1381 else
1382 {
1383 if ((r >> prec) != 0)
1384 *overflow = true;
1385 }
1386 }
1387 val[0] = high ? r >> prec : r;
1388 return 1;
1389 }
1390
1391 /* We do unsigned mul and then correct it. */
1392 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1393 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1394
1395 /* The 2 is for a full mult. */
1396 memset (r, 0, half_blocks_needed * 2
1397 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1398
1399 for (j = 0; j < half_blocks_needed; j++)
1400 {
1401 k = 0;
1402 for (i = 0; i < half_blocks_needed; i++)
1403 {
1404 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1405 + r[i + j] + k);
1406 r[i + j] = t & HALF_INT_MASK;
1407 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1408 }
1409 r[j + half_blocks_needed] = k;
1410 }
1411
1412 /* We did unsigned math above. For signed we must adjust the
1413 product (assuming we need to see that). */
1414 if (sgn == SIGNED && (high || needs_overflow))
1415 {
1416 unsigned HOST_WIDE_INT b;
1417 if (wi::neg_p (op1))
1418 {
1419 b = 0;
1420 for (i = 0; i < half_blocks_needed; i++)
1421 {
1422 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1423 - (unsigned HOST_WIDE_INT)v[i] - b;
1424 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1425 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1426 }
1427 }
1428 if (wi::neg_p (op2))
1429 {
1430 b = 0;
1431 for (i = 0; i < half_blocks_needed; i++)
1432 {
1433 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1434 - (unsigned HOST_WIDE_INT)u[i] - b;
1435 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1436 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1437 }
1438 }
1439 }
1440
1441 if (needs_overflow)
1442 {
1443 HOST_WIDE_INT top;
1444
1445 /* For unsigned, overflow is true if any of the top bits are set.
1446 For signed, overflow is true if any of the top bits are not equal
1447 to the sign bit. */
1448 if (sgn == UNSIGNED)
1449 top = 0;
1450 else
1451 {
1452 top = r[(half_blocks_needed) - 1];
1453 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1454 top &= mask;
1455 }
1456
1457 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1458 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1459 *overflow = true;
1460 }
1461
1462 if (high)
1463 {
1464 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1465 wi_pack ((unsigned HOST_WIDE_INT *) val,
1466 &r[half_blocks_needed], half_blocks_needed);
1467 return canonize (val, blocks_needed, prec);
1468 }
1469 else
1470 {
1471 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1472 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1473 return canonize (val, blocks_needed, prec);
1474 }
1475 }
1476
1477 /* Compute the population count of X. */
1478 int
1479 wi::popcount (const wide_int_ref &x)
1480 {
1481 unsigned int i;
1482 int count;
1483
1484 /* The high order block is special if it is the last block and the
1485 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1486 have to clear out any ones above the precision before doing
1487 popcount on this block. */
1488 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1489 unsigned int stop = x.len;
1490 if (count < 0)
1491 {
1492 count = popcount_hwi (x.uhigh () << -count);
1493 stop -= 1;
1494 }
1495 else
1496 {
1497 if (x.sign_mask () >= 0)
1498 count = 0;
1499 }
1500
1501 for (i = 0; i < stop; ++i)
1502 count += popcount_hwi (x.val[i]);
1503
1504 return count;
1505 }
1506
1507 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1508 whether the result overflows when OP0 and OP1 are treated as having
1509 signedness SGN. Return the number of blocks in VAL. */
1510 unsigned int
1511 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1512 unsigned int op0len, const HOST_WIDE_INT *op1,
1513 unsigned int op1len, unsigned int prec,
1514 signop sgn, bool *overflow)
1515 {
1516 unsigned HOST_WIDE_INT o0 = 0;
1517 unsigned HOST_WIDE_INT o1 = 0;
1518 unsigned HOST_WIDE_INT x = 0;
1519 /* We implement subtraction as an in place negate and add. Negation
1520 is just inversion and add 1, so we can do the add of 1 by just
1521 starting the borrow in of the first element at 1. */
1522 unsigned HOST_WIDE_INT borrow = 0;
1523 unsigned HOST_WIDE_INT old_borrow = 0;
1524
1525 unsigned HOST_WIDE_INT mask0, mask1;
1526 unsigned int i;
1527
1528 unsigned int len = MAX (op0len, op1len);
1529 mask0 = -top_bit_of (op0, op0len, prec);
1530 mask1 = -top_bit_of (op1, op1len, prec);
1531
1532 /* Subtract all of the explicitly defined elements. */
1533 for (i = 0; i < len; i++)
1534 {
1535 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1536 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1537 x = o0 - o1 - borrow;
1538 val[i] = x;
1539 old_borrow = borrow;
1540 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1541 }
1542
1543 if (len * HOST_BITS_PER_WIDE_INT < prec)
1544 {
1545 val[len] = mask0 - mask1 - borrow;
1546 len++;
1547 if (overflow)
1548 *overflow = false;
1549 }
1550 else if (overflow)
1551 {
1552 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1553 if (sgn == SIGNED)
1554 {
1555 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1556 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1557 }
1558 else
1559 {
1560 /* Put the MSB of X and O0 and in the top of the HWI. */
1561 x <<= shift;
1562 o0 <<= shift;
1563 if (old_borrow)
1564 *overflow = (x >= o0);
1565 else
1566 *overflow = (x > o0);
1567 }
1568 }
1569
1570 return canonize (val, len, prec);
1571 }
1572
1573
1574 /*
1575 * Division and Mod
1576 */
1577
1578 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1579 algorithm is a small modification of the algorithm in Hacker's
1580 Delight by Warren, which itself is a small modification of Knuth's
1581 algorithm. M is the number of significant elements of U however
1582 there needs to be at least one extra element of B_DIVIDEND
1583 allocated, N is the number of elements of B_DIVISOR. */
1584 static void
1585 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1586 unsigned HOST_HALF_WIDE_INT *b_remainder,
1587 unsigned HOST_HALF_WIDE_INT *b_dividend,
1588 unsigned HOST_HALF_WIDE_INT *b_divisor,
1589 int m, int n)
1590 {
1591 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1592 HOST_WIDE_INT and stored in the lower bits of each word. This
1593 algorithm should work properly on both 32 and 64 bit
1594 machines. */
1595 unsigned HOST_WIDE_INT b
1596 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1597 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1598 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1599 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1600 HOST_WIDE_INT t, k;
1601 int i, j, s;
1602
1603 /* Single digit divisor. */
1604 if (n == 1)
1605 {
1606 k = 0;
1607 for (j = m - 1; j >= 0; j--)
1608 {
1609 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1610 k = ((k * b + b_dividend[j])
1611 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1612 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1613 }
1614 b_remainder[0] = k;
1615 return;
1616 }
1617
1618 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1619
1620 if (s)
1621 {
1622 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1623 algorithm, we can overwrite b_dividend and b_divisor, so we do
1624 that. */
1625 for (i = n - 1; i > 0; i--)
1626 b_divisor[i] = (b_divisor[i] << s)
1627 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1628 b_divisor[0] = b_divisor[0] << s;
1629
1630 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1631 for (i = m - 1; i > 0; i--)
1632 b_dividend[i] = (b_dividend[i] << s)
1633 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634 b_dividend[0] = b_dividend[0] << s;
1635 }
1636
1637 /* Main loop. */
1638 for (j = m - n; j >= 0; j--)
1639 {
1640 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1641 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1642 again:
1643 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1644 {
1645 qhat -= 1;
1646 rhat += b_divisor[n-1];
1647 if (rhat < b)
1648 goto again;
1649 }
1650
1651 /* Multiply and subtract. */
1652 k = 0;
1653 for (i = 0; i < n; i++)
1654 {
1655 p = qhat * b_divisor[i];
1656 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1657 b_dividend[i + j] = t;
1658 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1659 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1660 }
1661 t = b_dividend[j+n] - k;
1662 b_dividend[j+n] = t;
1663
1664 b_quotient[j] = qhat;
1665 if (t < 0)
1666 {
1667 b_quotient[j] -= 1;
1668 k = 0;
1669 for (i = 0; i < n; i++)
1670 {
1671 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1672 b_dividend[i+j] = t;
1673 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1674 }
1675 b_dividend[j+n] += k;
1676 }
1677 }
1678 if (s)
1679 for (i = 0; i < n; i++)
1680 b_remainder[i] = (b_dividend[i] >> s)
1681 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1682 else
1683 for (i = 0; i < n; i++)
1684 b_remainder[i] = b_dividend[i];
1685 }
1686
1687
1688 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1689 the result. If QUOTIENT is nonnull, store the value of the quotient
1690 there and return the number of blocks in it. The return value is
1691 not defined otherwise. If REMAINDER is nonnull, store the value
1692 of the remainder there and store the number of blocks in
1693 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1694 the division overflowed. */
1695 unsigned int
1696 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1697 HOST_WIDE_INT *remainder,
1698 const HOST_WIDE_INT *dividend_val,
1699 unsigned int dividend_len, unsigned int dividend_prec,
1700 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1701 unsigned int divisor_prec, signop sgn,
1702 bool *oflow)
1703 {
1704 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1705 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1706 unsigned HOST_HALF_WIDE_INT
1707 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1708 unsigned HOST_HALF_WIDE_INT
1709 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1710 unsigned HOST_HALF_WIDE_INT
1711 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1712 unsigned HOST_HALF_WIDE_INT
1713 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714 unsigned int m, n;
1715 bool dividend_neg = false;
1716 bool divisor_neg = false;
1717 bool overflow = false;
1718 wide_int neg_dividend, neg_divisor;
1719
1720 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1721 dividend_prec);
1722 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1723 divisor_prec);
1724 if (divisor == 0)
1725 overflow = true;
1726
1727 /* The smallest signed number / -1 causes overflow. The dividend_len
1728 check is for speed rather than correctness. */
1729 if (sgn == SIGNED
1730 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1731 && divisor == -1
1732 && wi::only_sign_bit_p (dividend))
1733 overflow = true;
1734
1735 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1736 (signed min / -1) has the same representation as the orignal dividend.
1737 We have traditionally made division by zero act as division by one,
1738 so there too we use the original dividend. */
1739 if (overflow)
1740 {
1741 if (remainder)
1742 {
1743 *remainder_len = 1;
1744 remainder[0] = 0;
1745 }
1746 if (oflow != 0)
1747 *oflow = true;
1748 if (quotient)
1749 for (unsigned int i = 0; i < dividend_len; ++i)
1750 quotient[i] = dividend_val[i];
1751 return dividend_len;
1752 }
1753
1754 if (oflow)
1755 *oflow = false;
1756
1757 /* Do it on the host if you can. */
1758 if (sgn == SIGNED
1759 && wi::fits_shwi_p (dividend)
1760 && wi::fits_shwi_p (divisor))
1761 {
1762 HOST_WIDE_INT o0 = dividend.to_shwi ();
1763 HOST_WIDE_INT o1 = divisor.to_shwi ();
1764
1765 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1766 {
1767 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1768 if (quotient)
1769 {
1770 quotient[0] = HOST_WIDE_INT_MIN;
1771 quotient[1] = 0;
1772 }
1773 if (remainder)
1774 {
1775 remainder[0] = 0;
1776 *remainder_len = 1;
1777 }
1778 return 2;
1779 }
1780 else
1781 {
1782 if (quotient)
1783 quotient[0] = o0 / o1;
1784 if (remainder)
1785 {
1786 remainder[0] = o0 % o1;
1787 *remainder_len = 1;
1788 }
1789 return 1;
1790 }
1791 }
1792
1793 if (sgn == UNSIGNED
1794 && wi::fits_uhwi_p (dividend)
1795 && wi::fits_uhwi_p (divisor))
1796 {
1797 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1798 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1799
1800 if (quotient)
1801 quotient[0] = o0 / o1;
1802 if (remainder)
1803 {
1804 remainder[0] = o0 % o1;
1805 *remainder_len = 1;
1806 }
1807 return 1;
1808 }
1809
1810 /* Make the divisor and dividend positive and remember what we
1811 did. */
1812 if (sgn == SIGNED)
1813 {
1814 if (wi::neg_p (dividend))
1815 {
1816 neg_dividend = -dividend;
1817 dividend = neg_dividend;
1818 dividend_neg = true;
1819 }
1820 if (wi::neg_p (divisor))
1821 {
1822 neg_divisor = -divisor;
1823 divisor = neg_divisor;
1824 divisor_neg = true;
1825 }
1826 }
1827
1828 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1829 dividend_blocks_needed, dividend_prec, sgn);
1830 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1831 divisor_blocks_needed, divisor_prec, sgn);
1832
1833 m = dividend_blocks_needed;
1834 b_dividend[m] = 0;
1835 while (m > 1 && b_dividend[m - 1] == 0)
1836 m--;
1837
1838 n = divisor_blocks_needed;
1839 while (n > 1 && b_divisor[n - 1] == 0)
1840 n--;
1841
1842 memset (b_quotient, 0, sizeof (b_quotient));
1843
1844 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1845
1846 unsigned int quotient_len = 0;
1847 if (quotient)
1848 {
1849 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1850 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1851 /* The quotient is neg if exactly one of the divisor or dividend is
1852 neg. */
1853 if (dividend_neg != divisor_neg)
1854 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1855 quotient_len, dividend_prec,
1856 UNSIGNED, 0);
1857 }
1858
1859 if (remainder)
1860 {
1861 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1862 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1863 /* The remainder is always the same sign as the dividend. */
1864 if (dividend_neg)
1865 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1866 *remainder_len, dividend_prec,
1867 UNSIGNED, 0);
1868 }
1869
1870 return quotient_len;
1871 }
1872
1873 /*
1874 * Shifting, rotating and extraction.
1875 */
1876
1877 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1878 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1879 unsigned int
1880 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1881 unsigned int xlen, unsigned int precision,
1882 unsigned int shift)
1883 {
1884 /* Split the shift into a whole-block shift and a subblock shift. */
1885 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1886 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1887
1888 /* The whole-block shift fills with zeros. */
1889 unsigned int len = BLOCKS_NEEDED (precision);
1890 for (unsigned int i = 0; i < skip; ++i)
1891 val[i] = 0;
1892
1893 /* It's easier to handle the simple block case specially. */
1894 if (small_shift == 0)
1895 for (unsigned int i = skip; i < len; ++i)
1896 val[i] = safe_uhwi (xval, xlen, i - skip);
1897 else
1898 {
1899 /* The first unfilled output block is a left shift of the first
1900 block in XVAL. The other output blocks contain bits from two
1901 consecutive input blocks. */
1902 unsigned HOST_WIDE_INT carry = 0;
1903 for (unsigned int i = skip; i < len; ++i)
1904 {
1905 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1906 val[i] = (x << small_shift) | carry;
1907 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1908 }
1909 }
1910 return canonize (val, len, precision);
1911 }
1912
1913 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1914 number of blocks in VAL. The input has XPRECISION bits and the
1915 output has XPRECISION - SHIFT bits. */
1916 static unsigned int
1917 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1918 unsigned int xlen, unsigned int xprecision,
1919 unsigned int shift)
1920 {
1921 /* Split the shift into a whole-block shift and a subblock shift. */
1922 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1923 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1924
1925 /* Work out how many blocks are needed to store the significant bits
1926 (excluding the upper zeros or signs). */
1927 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1928
1929 /* It's easier to handle the simple block case specially. */
1930 if (small_shift == 0)
1931 for (unsigned int i = 0; i < len; ++i)
1932 val[i] = safe_uhwi (xval, xlen, i + skip);
1933 else
1934 {
1935 /* Each output block but the last is a combination of two input blocks.
1936 The last block is a right shift of the last block in XVAL. */
1937 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1938 for (unsigned int i = 0; i < len; ++i)
1939 {
1940 val[i] = curr >> small_shift;
1941 curr = safe_uhwi (xval, xlen, i + skip + 1);
1942 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1943 }
1944 }
1945 return len;
1946 }
1947
1948 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1949 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1950 VAL has PRECISION bits. */
1951 unsigned int
1952 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1953 unsigned int xlen, unsigned int xprecision,
1954 unsigned int precision, unsigned int shift)
1955 {
1956 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1957
1958 /* The value we just created has precision XPRECISION - SHIFT.
1959 Zero-extend it to wider precisions. */
1960 if (precision > xprecision - shift)
1961 {
1962 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1963 if (small_prec)
1964 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1965 else if (val[len - 1] < 0)
1966 {
1967 /* Add a new block with a zero. */
1968 val[len++] = 0;
1969 return len;
1970 }
1971 }
1972 return canonize (val, len, precision);
1973 }
1974
1975 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1976 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1977 VAL has PRECISION bits. */
1978 unsigned int
1979 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1980 unsigned int xlen, unsigned int xprecision,
1981 unsigned int precision, unsigned int shift)
1982 {
1983 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1984
1985 /* The value we just created has precision XPRECISION - SHIFT.
1986 Sign-extend it to wider types. */
1987 if (precision > xprecision - shift)
1988 {
1989 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1990 if (small_prec)
1991 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1992 }
1993 return canonize (val, len, precision);
1994 }
1995
1996 /* Return the number of leading (upper) zeros in X. */
1997 int
1998 wi::clz (const wide_int_ref &x)
1999 {
2000 /* Calculate how many bits there above the highest represented block. */
2001 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2002
2003 unsigned HOST_WIDE_INT high = x.uhigh ();
2004 if (count < 0)
2005 /* The upper -COUNT bits of HIGH are not part of the value.
2006 Clear them out. */
2007 high = (high << -count) >> -count;
2008 else if (x.sign_mask () < 0)
2009 /* The upper bit is set, so there are no leading zeros. */
2010 return 0;
2011
2012 /* We don't need to look below HIGH. Either HIGH is nonzero,
2013 or the top bit of the block below is nonzero; clz_hwi is
2014 HOST_BITS_PER_WIDE_INT in the latter case. */
2015 return count + clz_hwi (high);
2016 }
2017
2018 /* Return the number of redundant sign bits in X. (That is, the number
2019 of bits immediately below the sign bit that have the same value as
2020 the sign bit.) */
2021 int
2022 wi::clrsb (const wide_int_ref &x)
2023 {
2024 /* Calculate how many bits there above the highest represented block. */
2025 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2026
2027 unsigned HOST_WIDE_INT high = x.uhigh ();
2028 unsigned HOST_WIDE_INT mask = -1;
2029 if (count < 0)
2030 {
2031 /* The upper -COUNT bits of HIGH are not part of the value.
2032 Clear them from both MASK and HIGH. */
2033 mask >>= -count;
2034 high &= mask;
2035 }
2036
2037 /* If the top bit is 1, count the number of leading 1s. If the top
2038 bit is zero, count the number of leading zeros. */
2039 if (high > mask / 2)
2040 high ^= mask;
2041
2042 /* There are no sign bits below the top block, so we don't need to look
2043 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2044 HIGH is 0. */
2045 return count + clz_hwi (high) - 1;
2046 }
2047
2048 /* Return the number of trailing (lower) zeros in X. */
2049 int
2050 wi::ctz (const wide_int_ref &x)
2051 {
2052 if (x.len == 1 && x.ulow () == 0)
2053 return x.precision;
2054
2055 /* Having dealt with the zero case, there must be a block with a
2056 nonzero bit. We don't care about the bits above the first 1. */
2057 unsigned int i = 0;
2058 while (x.val[i] == 0)
2059 ++i;
2060 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2061 }
2062
2063 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2064 return -1. */
2065 int
2066 wi::exact_log2 (const wide_int_ref &x)
2067 {
2068 /* Reject cases where there are implicit -1 blocks above HIGH. */
2069 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2070 return -1;
2071
2072 /* Set CRUX to the index of the entry that should be nonzero.
2073 If the top block is zero then the next lowest block (if any)
2074 must have the high bit set. */
2075 unsigned int crux = x.len - 1;
2076 if (crux > 0 && x.val[crux] == 0)
2077 crux -= 1;
2078
2079 /* Check that all lower blocks are zero. */
2080 for (unsigned int i = 0; i < crux; ++i)
2081 if (x.val[i] != 0)
2082 return -1;
2083
2084 /* Get a zero-extended form of block CRUX. */
2085 unsigned HOST_WIDE_INT hwi = x.val[crux];
2086 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2087 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2088
2089 /* Now it's down to whether HWI is a power of 2. */
2090 int res = ::exact_log2 (hwi);
2091 if (res >= 0)
2092 res += crux * HOST_BITS_PER_WIDE_INT;
2093 return res;
2094 }
2095
2096 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2097 int
2098 wi::floor_log2 (const wide_int_ref &x)
2099 {
2100 return x.precision - 1 - clz (x);
2101 }
2102
2103 /* Return the index of the first (lowest) set bit in X, counting from 1.
2104 Return 0 if X is 0. */
2105 int
2106 wi::ffs (const wide_int_ref &x)
2107 {
2108 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2109 }
2110
2111 /* Return true if sign-extending X to have precision PRECISION would give
2112 the minimum signed value at that precision. */
2113 bool
2114 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2115 {
2116 return ctz (x) + 1 == int (precision);
2117 }
2118
2119 /* Return true if X represents the minimum signed value. */
2120 bool
2121 wi::only_sign_bit_p (const wide_int_ref &x)
2122 {
2123 return only_sign_bit_p (x, x.precision);
2124 }
2125
2126 /*
2127 * Private utilities.
2128 */
2129
2130 void gt_ggc_mx (widest_int *) { }
2131 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2132 void gt_pch_nx (widest_int *) { }
2133
2134 template void wide_int::dump () const;
2135 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2136 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2137 template void offset_int::dump () const;
2138 template void widest_int::dump () const;