simplify-rtx.c (simplify_binary_operation): Constant fold DIV, MOD, UDIV and UMOD...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 \f
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
76 {
77 rtx tem;
78
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
83
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
88
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
91
92 if (code == PLUS || code == MINUS)
93 {
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
97 }
98
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
100 }
101 \f
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
104 rtx
105 avoid_constant_pool_reference (rtx x)
106 {
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
109
110 switch (GET_CODE (x))
111 {
112 case MEM:
113 break;
114
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 {
121 REAL_VALUE_TYPE d;
122
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 }
126 return x;
127
128 default:
129 return x;
130 }
131
132 addr = XEXP (x, 0);
133
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = targetm.delegitimize_address (addr);
136
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
139
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
143
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
146
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
151 {
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
154 }
155
156 return c;
157 }
158 \f
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
161
162 rtx
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
165 {
166 rtx tem;
167
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
171
172 return gen_rtx_fmt_e (code, mode, op);
173 }
174
175 /* Likewise for ternary operations. */
176
177 rtx
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 {
181 rtx tem;
182
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
187
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
189 }
190 \f
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
193 */
194
195 rtx
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 {
199 rtx tem;
200
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
205
206 if (cmp_mode != VOIDmode)
207 {
208 tem = simplify_relational_operation (code, mode, cmp_mode, op0, op1);
209 if (tem)
210 return tem;
211 }
212
213 /* For the following tests, ensure const0_rtx is op1. */
214 if (swap_commutative_operands_p (op0, op1)
215 || (op0 == const0_rtx && op1 != const0_rtx))
216 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
217
218 /* If op0 is a compare, extract the comparison arguments from it. */
219 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
220 return simplify_gen_relational (code, mode, VOIDmode,
221 XEXP (op0, 0), XEXP (op0, 1));
222
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (COMPARISON_P (op0) && op1 == const0_rtx)
225 {
226 if (code == NE)
227 {
228 if (GET_MODE (op0) == mode)
229 return op0;
230 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
231 XEXP (op0, 0), XEXP (op0, 1));
232 }
233 else if (code == EQ)
234 {
235 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
236 if (new != UNKNOWN)
237 return simplify_gen_relational (new, mode, VOIDmode,
238 XEXP (op0, 0), XEXP (op0, 1));
239 }
240 }
241
242 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 }
244 \f
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
247
248 rtx
249 simplify_replace_rtx (rtx x, rtx old, rtx new)
250 {
251 enum rtx_code code = GET_CODE (x);
252 enum machine_mode mode = GET_MODE (x);
253 enum machine_mode op_mode;
254 rtx op0, op1, op2;
255
256 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
257 to build a new expression substituting recursively. If we can't do
258 anything, return our input. */
259
260 if (x == old)
261 return new;
262
263 switch (GET_RTX_CLASS (code))
264 {
265 case RTX_UNARY:
266 op0 = XEXP (x, 0);
267 op_mode = GET_MODE (op0);
268 op0 = simplify_replace_rtx (op0, old, new);
269 if (op0 == XEXP (x, 0))
270 return x;
271 return simplify_gen_unary (code, mode, op0, op_mode);
272
273 case RTX_BIN_ARITH:
274 case RTX_COMM_ARITH:
275 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
276 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
277 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
278 return x;
279 return simplify_gen_binary (code, mode, op0, op1);
280
281 case RTX_COMPARE:
282 case RTX_COMM_COMPARE:
283 op0 = XEXP (x, 0);
284 op1 = XEXP (x, 1);
285 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
286 op0 = simplify_replace_rtx (op0, old, new);
287 op1 = simplify_replace_rtx (op1, old, new);
288 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
289 return x;
290 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291
292 case RTX_TERNARY:
293 case RTX_BITFIELD_OPS:
294 op0 = XEXP (x, 0);
295 op_mode = GET_MODE (op0);
296 op0 = simplify_replace_rtx (op0, old, new);
297 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
298 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
299 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
300 return x;
301 if (op_mode == VOIDmode)
302 op_mode = GET_MODE (op0);
303 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304
305 case RTX_EXTRA:
306 /* The only case we try to handle is a SUBREG. */
307 if (code == SUBREG)
308 {
309 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
310 if (op0 == SUBREG_REG (x))
311 return x;
312 op0 = simplify_gen_subreg (GET_MODE (x), op0,
313 GET_MODE (SUBREG_REG (x)),
314 SUBREG_BYTE (x));
315 return op0 ? op0 : x;
316 }
317 break;
318
319 case RTX_OBJ:
320 if (code == MEM)
321 {
322 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
323 if (op0 == XEXP (x, 0))
324 return x;
325 return replace_equiv_address_nv (x, op0);
326 }
327 else if (code == LO_SUM)
328 {
329 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
330 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
331
332 /* (lo_sum (high x) x) -> x */
333 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 return op1;
335
336 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
337 return x;
338 return gen_rtx_LO_SUM (mode, op0, op1);
339 }
340 else if (code == REG)
341 {
342 if (REG_P (old) && REGNO (x) == REGNO (old))
343 return new;
344 }
345 break;
346
347 default:
348 break;
349 }
350 return x;
351 }
352 \f
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
356 rtx
357 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
358 rtx op, enum machine_mode op_mode)
359 {
360 unsigned int width = GET_MODE_BITSIZE (mode);
361 rtx trueop = avoid_constant_pool_reference (op);
362
363 if (code == VEC_DUPLICATE)
364 {
365 if (!VECTOR_MODE_P (mode))
366 abort ();
367 if (GET_MODE (trueop) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop))
369 && GET_MODE_INNER (mode) != GET_MODE (trueop))
370 abort ();
371 if (GET_MODE (trueop) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop))
373 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
374 abort ();
375 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
376 || GET_CODE (trueop) == CONST_VECTOR)
377 {
378 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
379 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
380 rtvec v = rtvec_alloc (n_elts);
381 unsigned int i;
382
383 if (GET_CODE (trueop) != CONST_VECTOR)
384 for (i = 0; i < n_elts; i++)
385 RTVEC_ELT (v, i) = trueop;
386 else
387 {
388 enum machine_mode inmode = GET_MODE (trueop);
389 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
390 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
391
392 if (in_n_elts >= n_elts || n_elts % in_n_elts)
393 abort ();
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
396 }
397 return gen_rtx_CONST_VECTOR (mode, v);
398 }
399 }
400 else if (GET_CODE (op) == CONST)
401 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
402
403 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
404 {
405 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
406 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
407 enum machine_mode opmode = GET_MODE (trueop);
408 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
409 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
410 rtvec v = rtvec_alloc (n_elts);
411 unsigned int i;
412
413 if (op_n_elts != n_elts)
414 abort ();
415
416 for (i = 0; i < n_elts; i++)
417 {
418 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
419 CONST_VECTOR_ELT (trueop, i),
420 GET_MODE_INNER (opmode));
421 if (!x)
422 return 0;
423 RTVEC_ELT (v, i) = x;
424 }
425 return gen_rtx_CONST_VECTOR (mode, v);
426 }
427
428 /* The order of these tests is critical so that, for example, we don't
429 check the wrong mode (input vs. output) for a conversion operation,
430 such as FIX. At some point, this should be simplified. */
431
432 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
433 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
434 {
435 HOST_WIDE_INT hv, lv;
436 REAL_VALUE_TYPE d;
437
438 if (GET_CODE (trueop) == CONST_INT)
439 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
440 else
441 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
442
443 REAL_VALUE_FROM_INT (d, lv, hv, mode);
444 d = real_value_truncate (mode, d);
445 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
446 }
447 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
448 && (GET_CODE (trueop) == CONST_DOUBLE
449 || GET_CODE (trueop) == CONST_INT))
450 {
451 HOST_WIDE_INT hv, lv;
452 REAL_VALUE_TYPE d;
453
454 if (GET_CODE (trueop) == CONST_INT)
455 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
456 else
457 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
458
459 if (op_mode == VOIDmode)
460 {
461 /* We don't know how to interpret negative-looking numbers in
462 this case, so don't try to fold those. */
463 if (hv < 0)
464 return 0;
465 }
466 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
467 ;
468 else
469 hv = 0, lv &= GET_MODE_MASK (op_mode);
470
471 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
472 d = real_value_truncate (mode, d);
473 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 }
475
476 if (GET_CODE (trueop) == CONST_INT
477 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
478 {
479 HOST_WIDE_INT arg0 = INTVAL (trueop);
480 HOST_WIDE_INT val;
481
482 switch (code)
483 {
484 case NOT:
485 val = ~ arg0;
486 break;
487
488 case NEG:
489 val = - arg0;
490 break;
491
492 case ABS:
493 val = (arg0 >= 0 ? arg0 : - arg0);
494 break;
495
496 case FFS:
497 /* Don't use ffs here. Instead, get low order bit and then its
498 number. If arg0 is zero, this will return 0, as desired. */
499 arg0 &= GET_MODE_MASK (mode);
500 val = exact_log2 (arg0 & (- arg0)) + 1;
501 break;
502
503 case CLZ:
504 arg0 &= GET_MODE_MASK (mode);
505 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
506 ;
507 else
508 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
509 break;
510
511 case CTZ:
512 arg0 &= GET_MODE_MASK (mode);
513 if (arg0 == 0)
514 {
515 /* Even if the value at zero is undefined, we have to come
516 up with some replacement. Seems good enough. */
517 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
518 val = GET_MODE_BITSIZE (mode);
519 }
520 else
521 val = exact_log2 (arg0 & -arg0);
522 break;
523
524 case POPCOUNT:
525 arg0 &= GET_MODE_MASK (mode);
526 val = 0;
527 while (arg0)
528 val++, arg0 &= arg0 - 1;
529 break;
530
531 case PARITY:
532 arg0 &= GET_MODE_MASK (mode);
533 val = 0;
534 while (arg0)
535 val++, arg0 &= arg0 - 1;
536 val &= 1;
537 break;
538
539 case TRUNCATE:
540 val = arg0;
541 break;
542
543 case ZERO_EXTEND:
544 /* When zero-extending a CONST_INT, we need to know its
545 original mode. */
546 if (op_mode == VOIDmode)
547 abort ();
548 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
549 {
550 /* If we were really extending the mode,
551 we would have to distinguish between zero-extension
552 and sign-extension. */
553 if (width != GET_MODE_BITSIZE (op_mode))
554 abort ();
555 val = arg0;
556 }
557 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
558 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
559 else
560 return 0;
561 break;
562
563 case SIGN_EXTEND:
564 if (op_mode == VOIDmode)
565 op_mode = mode;
566 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
567 {
568 /* If we were really extending the mode,
569 we would have to distinguish between zero-extension
570 and sign-extension. */
571 if (width != GET_MODE_BITSIZE (op_mode))
572 abort ();
573 val = arg0;
574 }
575 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 {
577 val
578 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
579 if (val
580 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
581 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
582 }
583 else
584 return 0;
585 break;
586
587 case SQRT:
588 case FLOAT_EXTEND:
589 case FLOAT_TRUNCATE:
590 case SS_TRUNCATE:
591 case US_TRUNCATE:
592 return 0;
593
594 default:
595 abort ();
596 }
597
598 val = trunc_int_for_mode (val, mode);
599
600 return GEN_INT (val);
601 }
602
603 /* We can do some operations on integer CONST_DOUBLEs. Also allow
604 for a DImode operation on a CONST_INT. */
605 else if (GET_MODE (trueop) == VOIDmode
606 && width <= HOST_BITS_PER_WIDE_INT * 2
607 && (GET_CODE (trueop) == CONST_DOUBLE
608 || GET_CODE (trueop) == CONST_INT))
609 {
610 unsigned HOST_WIDE_INT l1, lv;
611 HOST_WIDE_INT h1, hv;
612
613 if (GET_CODE (trueop) == CONST_DOUBLE)
614 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
615 else
616 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
617
618 switch (code)
619 {
620 case NOT:
621 lv = ~ l1;
622 hv = ~ h1;
623 break;
624
625 case NEG:
626 neg_double (l1, h1, &lv, &hv);
627 break;
628
629 case ABS:
630 if (h1 < 0)
631 neg_double (l1, h1, &lv, &hv);
632 else
633 lv = l1, hv = h1;
634 break;
635
636 case FFS:
637 hv = 0;
638 if (l1 == 0)
639 {
640 if (h1 == 0)
641 lv = 0;
642 else
643 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
644 }
645 else
646 lv = exact_log2 (l1 & -l1) + 1;
647 break;
648
649 case CLZ:
650 hv = 0;
651 if (h1 != 0)
652 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
653 - HOST_BITS_PER_WIDE_INT;
654 else if (l1 != 0)
655 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
656 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
657 lv = GET_MODE_BITSIZE (mode);
658 break;
659
660 case CTZ:
661 hv = 0;
662 if (l1 != 0)
663 lv = exact_log2 (l1 & -l1);
664 else if (h1 != 0)
665 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
666 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
667 lv = GET_MODE_BITSIZE (mode);
668 break;
669
670 case POPCOUNT:
671 hv = 0;
672 lv = 0;
673 while (l1)
674 lv++, l1 &= l1 - 1;
675 while (h1)
676 lv++, h1 &= h1 - 1;
677 break;
678
679 case PARITY:
680 hv = 0;
681 lv = 0;
682 while (l1)
683 lv++, l1 &= l1 - 1;
684 while (h1)
685 lv++, h1 &= h1 - 1;
686 lv &= 1;
687 break;
688
689 case TRUNCATE:
690 /* This is just a change-of-mode, so do nothing. */
691 lv = l1, hv = h1;
692 break;
693
694 case ZERO_EXTEND:
695 if (op_mode == VOIDmode)
696 abort ();
697
698 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700
701 hv = 0;
702 lv = l1 & GET_MODE_MASK (op_mode);
703 break;
704
705 case SIGN_EXTEND:
706 if (op_mode == VOIDmode
707 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
708 return 0;
709 else
710 {
711 lv = l1 & GET_MODE_MASK (op_mode);
712 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
713 && (lv & ((HOST_WIDE_INT) 1
714 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
715 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
716
717 hv = HWI_SIGN_EXTEND (lv);
718 }
719 break;
720
721 case SQRT:
722 return 0;
723
724 default:
725 return 0;
726 }
727
728 return immed_double_const (lv, hv, mode);
729 }
730
731 else if (GET_CODE (trueop) == CONST_DOUBLE
732 && GET_MODE_CLASS (mode) == MODE_FLOAT)
733 {
734 REAL_VALUE_TYPE d, t;
735 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
736
737 switch (code)
738 {
739 case SQRT:
740 if (HONOR_SNANS (mode) && real_isnan (&d))
741 return 0;
742 real_sqrt (&t, mode, &d);
743 d = t;
744 break;
745 case ABS:
746 d = REAL_VALUE_ABS (d);
747 break;
748 case NEG:
749 d = REAL_VALUE_NEGATE (d);
750 break;
751 case FLOAT_TRUNCATE:
752 d = real_value_truncate (mode, d);
753 break;
754 case FLOAT_EXTEND:
755 /* All this does is change the mode. */
756 break;
757 case FIX:
758 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
759 break;
760 case NOT:
761 {
762 long tmp[4];
763 int i;
764
765 real_to_target (tmp, &d, GET_MODE (trueop));
766 for (i = 0; i < 4; i++)
767 tmp[i] = ~tmp[i];
768 real_from_target (&d, tmp, mode);
769 }
770 default:
771 abort ();
772 }
773 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 }
775
776 else if (GET_CODE (trueop) == CONST_DOUBLE
777 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
778 && GET_MODE_CLASS (mode) == MODE_INT
779 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
780 {
781 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
782 operators are intentionally left unspecified (to ease implementation
783 by target backends), for consistency, this routine implements the
784 same semantics for constant folding as used by the middle-end. */
785
786 HOST_WIDE_INT xh, xl, th, tl;
787 REAL_VALUE_TYPE x, t;
788 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
789 switch (code)
790 {
791 case FIX:
792 if (REAL_VALUE_ISNAN (x))
793 return const0_rtx;
794
795 /* Test against the signed upper bound. */
796 if (width > HOST_BITS_PER_WIDE_INT)
797 {
798 th = ((unsigned HOST_WIDE_INT) 1
799 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
800 tl = -1;
801 }
802 else
803 {
804 th = 0;
805 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
806 }
807 real_from_integer (&t, VOIDmode, tl, th, 0);
808 if (REAL_VALUES_LESS (t, x))
809 {
810 xh = th;
811 xl = tl;
812 break;
813 }
814
815 /* Test against the signed lower bound. */
816 if (width > HOST_BITS_PER_WIDE_INT)
817 {
818 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
819 tl = 0;
820 }
821 else
822 {
823 th = -1;
824 tl = (HOST_WIDE_INT) -1 << (width - 1);
825 }
826 real_from_integer (&t, VOIDmode, tl, th, 0);
827 if (REAL_VALUES_LESS (x, t))
828 {
829 xh = th;
830 xl = tl;
831 break;
832 }
833 REAL_VALUE_TO_INT (&xl, &xh, x);
834 break;
835
836 case UNSIGNED_FIX:
837 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
838 return const0_rtx;
839
840 /* Test against the unsigned upper bound. */
841 if (width == 2*HOST_BITS_PER_WIDE_INT)
842 {
843 th = -1;
844 tl = -1;
845 }
846 else if (width >= HOST_BITS_PER_WIDE_INT)
847 {
848 th = ((unsigned HOST_WIDE_INT) 1
849 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
850 tl = -1;
851 }
852 else
853 {
854 th = 0;
855 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
856 }
857 real_from_integer (&t, VOIDmode, tl, th, 1);
858 if (REAL_VALUES_LESS (t, x))
859 {
860 xh = th;
861 xl = tl;
862 break;
863 }
864
865 REAL_VALUE_TO_INT (&xl, &xh, x);
866 break;
867
868 default:
869 abort ();
870 }
871 return immed_double_const (xl, xh, mode);
872 }
873
874 /* This was formerly used only for non-IEEE float.
875 eggert@twinsun.com says it is safe for IEEE also. */
876 else
877 {
878 enum rtx_code reversed;
879 rtx temp;
880
881 /* There are some simplifications we can do even if the operands
882 aren't constant. */
883 switch (code)
884 {
885 case NOT:
886 /* (not (not X)) == X. */
887 if (GET_CODE (op) == NOT)
888 return XEXP (op, 0);
889
890 /* (not (eq X Y)) == (ne X Y), etc. */
891 if (COMPARISON_P (op)
892 && (mode == BImode || STORE_FLAG_VALUE == -1)
893 && ((reversed = reversed_comparison_code (op, NULL_RTX))
894 != UNKNOWN))
895 return simplify_gen_relational (reversed, mode, VOIDmode,
896 XEXP (op, 0), XEXP (op, 1));
897
898 /* (not (plus X -1)) can become (neg X). */
899 if (GET_CODE (op) == PLUS
900 && XEXP (op, 1) == constm1_rtx)
901 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
902
903 /* Similarly, (not (neg X)) is (plus X -1). */
904 if (GET_CODE (op) == NEG)
905 return plus_constant (XEXP (op, 0), -1);
906
907 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
908 if (GET_CODE (op) == XOR
909 && GET_CODE (XEXP (op, 1)) == CONST_INT
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
914
915
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
920 bother with. */
921 if (GET_CODE (op) == ASHIFT
922 && XEXP (op, 0) == const1_rtx)
923 {
924 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
925 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
926 }
927
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE == -1
931 && COMPARISON_P (op)
932 && (reversed = reversed_comparison_code (op, NULL_RTX))
933 != UNKNOWN)
934 return simplify_gen_relational (reversed, mode, VOIDmode,
935 XEXP (op, 0), XEXP (op, 1));
936
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
940
941 if (STORE_FLAG_VALUE == -1
942 && GET_CODE (op) == ASHIFTRT
943 && GET_CODE (XEXP (op, 1)) == CONST_INT
944 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
945 return simplify_gen_relational (GE, mode, VOIDmode,
946 XEXP (op, 0), const0_rtx);
947
948 break;
949
950 case NEG:
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op) == NEG)
953 return XEXP (op, 0);
954
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op) == PLUS
957 && XEXP (op, 1) == const1_rtx)
958 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
959
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op) == NOT)
962 return plus_constant (XEXP (op, 0), 1);
963
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
972 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
973 XEXP (op, 0));
974
975 if (GET_CODE (op) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 {
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op, 1)) == CONST_INT
981 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
982 {
983 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
984 mode);
985 if (temp)
986 return simplify_gen_binary (MINUS, mode, temp,
987 XEXP (op, 0));
988 }
989
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
992 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
993 }
994
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
999 {
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1002 }
1003
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1006 is a constant). */
1007 if (GET_CODE (op) == ASHIFT)
1008 {
1009 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1010 mode);
1011 if (temp)
1012 return simplify_gen_binary (ASHIFT, mode, temp,
1013 XEXP (op, 1));
1014 }
1015
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op) == ASHIFTRT
1019 && GET_CODE (XEXP (op, 1)) == CONST_INT
1020 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1021 return simplify_gen_binary (LSHIFTRT, mode,
1022 XEXP (op, 0), XEXP (op, 1));
1023
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == LSHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (ASHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1031
1032 break;
1033
1034 case SIGN_EXTEND:
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1038 the VAX). */
1039 if (GET_CODE (op) == TRUNCATE
1040 && GET_MODE (XEXP (op, 0)) == mode
1041 && GET_CODE (XEXP (op, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1044 return XEXP (op, 0);
1045
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1052 && GET_MODE (XEXP (op, 0)) == mode)
1053 return XEXP (op, 0);
1054
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode == Pmode && GET_MODE (op) == ptr_mode
1058 && (CONSTANT_P (op)
1059 || (GET_CODE (op) == SUBREG
1060 && GET_CODE (SUBREG_REG (op)) == REG
1061 && REG_POINTER (SUBREG_REG (op))
1062 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1063 return convert_memory_address (Pmode, op);
1064 #endif
1065 break;
1066
1067 case ZERO_EXTEND:
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op)
1074 && GET_MODE (XEXP (op, 0)) == mode)
1075 return XEXP (op, 0);
1076
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED > 0
1079 && mode == Pmode && GET_MODE (op) == ptr_mode
1080 && (CONSTANT_P (op)
1081 || (GET_CODE (op) == SUBREG
1082 && GET_CODE (SUBREG_REG (op)) == REG
1083 && REG_POINTER (SUBREG_REG (op))
1084 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1085 return convert_memory_address (Pmode, op);
1086 #endif
1087 break;
1088
1089 default:
1090 break;
1091 }
1092
1093 return 0;
1094 }
1095 }
1096 \f
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1102
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1106 {
1107 rtx tem;
1108
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1) == code)
1111 {
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0) == code)
1114 {
1115 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1116 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1117 }
1118
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1, op0))
1121 return simplify_gen_binary (code, mode, op1, op0);
1122
1123 tem = op0;
1124 op0 = op1;
1125 op1 = tem;
1126 }
1127
1128 if (GET_CODE (op0) == code)
1129 {
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1132 {
1133 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1134 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1135 }
1136
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1139 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1140 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1141 if (tem != 0)
1142 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1143
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1146 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1147 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1148 if (tem != 0)
1149 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1150 }
1151
1152 return 0;
1153 }
1154
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1157
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1160
1161 rtx
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1164 {
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1170
1171 #ifdef ENABLE_CHECKING
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1176
1177 if (GET_RTX_CLASS (code) == RTX_COMPARE
1178 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1179 abort ();
1180 #endif
1181
1182 /* Make sure the constant is second. */
1183 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1184 && swap_commutative_operands_p (op0, op1))
1185 {
1186 tem = op0, op0 = op1, op1 = tem;
1187 }
1188
1189 trueop0 = avoid_constant_pool_reference (op0);
1190 trueop1 = avoid_constant_pool_reference (op1);
1191
1192 if (VECTOR_MODE_P (mode)
1193 && GET_CODE (trueop0) == CONST_VECTOR
1194 && GET_CODE (trueop1) == CONST_VECTOR)
1195 {
1196 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1197 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1198 enum machine_mode op0mode = GET_MODE (trueop0);
1199 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1200 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1201 enum machine_mode op1mode = GET_MODE (trueop1);
1202 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1203 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1204 rtvec v = rtvec_alloc (n_elts);
1205 unsigned int i;
1206
1207 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1208 abort ();
1209
1210 for (i = 0; i < n_elts; i++)
1211 {
1212 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1213 CONST_VECTOR_ELT (trueop0, i),
1214 CONST_VECTOR_ELT (trueop1, i));
1215 if (!x)
1216 return 0;
1217 RTVEC_ELT (v, i) = x;
1218 }
1219
1220 return gen_rtx_CONST_VECTOR (mode, v);
1221 }
1222
1223 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1224 && GET_CODE (trueop0) == CONST_DOUBLE
1225 && GET_CODE (trueop1) == CONST_DOUBLE
1226 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1227 {
1228 if (code == AND
1229 || code == IOR
1230 || code == XOR)
1231 {
1232 long tmp0[4];
1233 long tmp1[4];
1234 REAL_VALUE_TYPE r;
1235 int i;
1236
1237 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1238 GET_MODE (op0));
1239 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1240 GET_MODE (op1));
1241 for (i = 0; i < 4; i++)
1242 {
1243 if (code == AND)
1244 tmp0[i] &= tmp1[i];
1245 else if (code == IOR)
1246 tmp0[i] |= tmp1[i];
1247 else if (code == XOR)
1248 tmp0[i] ^= tmp1[i];
1249 else
1250 abort ();
1251 }
1252 real_from_target (&r, tmp0, mode);
1253 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1254 }
1255 else
1256 {
1257 REAL_VALUE_TYPE f0, f1, value;
1258
1259 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1260 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1261 f0 = real_value_truncate (mode, f0);
1262 f1 = real_value_truncate (mode, f1);
1263
1264 if (HONOR_SNANS (mode)
1265 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1266 return 0;
1267
1268 if (code == DIV
1269 && REAL_VALUES_EQUAL (f1, dconst0)
1270 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1271 return 0;
1272
1273 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1274
1275 value = real_value_truncate (mode, value);
1276 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1277 }
1278 }
1279
1280 /* We can fold some multi-word operations. */
1281 if (GET_MODE_CLASS (mode) == MODE_INT
1282 && width == HOST_BITS_PER_WIDE_INT * 2
1283 && (GET_CODE (trueop0) == CONST_DOUBLE
1284 || GET_CODE (trueop0) == CONST_INT)
1285 && (GET_CODE (trueop1) == CONST_DOUBLE
1286 || GET_CODE (trueop1) == CONST_INT))
1287 {
1288 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1289 HOST_WIDE_INT h1, h2, hv, ht;
1290
1291 if (GET_CODE (trueop0) == CONST_DOUBLE)
1292 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1293 else
1294 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1295
1296 if (GET_CODE (trueop1) == CONST_DOUBLE)
1297 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1298 else
1299 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1300
1301 switch (code)
1302 {
1303 case MINUS:
1304 /* A - B == A + (-B). */
1305 neg_double (l2, h2, &lv, &hv);
1306 l2 = lv, h2 = hv;
1307
1308 /* Fall through.... */
1309
1310 case PLUS:
1311 add_double (l1, h1, l2, h2, &lv, &hv);
1312 break;
1313
1314 case MULT:
1315 mul_double (l1, h1, l2, h2, &lv, &hv);
1316 break;
1317
1318 case DIV:
1319 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1320 &lv, &hv, &lt, &ht))
1321 return 0;
1322 break;
1323
1324 case MOD:
1325 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1326 &lt, &ht, &lv, &hv))
1327 return 0;
1328 break;
1329
1330 case UDIV:
1331 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1332 &lv, &hv, &lt, &ht))
1333 return 0;
1334 break;
1335
1336 case UMOD:
1337 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1338 &lt, &ht, &lv, &hv))
1339 return 0;
1340 break;
1341
1342 case AND:
1343 lv = l1 & l2, hv = h1 & h2;
1344 break;
1345
1346 case IOR:
1347 lv = l1 | l2, hv = h1 | h2;
1348 break;
1349
1350 case XOR:
1351 lv = l1 ^ l2, hv = h1 ^ h2;
1352 break;
1353
1354 case SMIN:
1355 if (h1 < h2
1356 || (h1 == h2
1357 && ((unsigned HOST_WIDE_INT) l1
1358 < (unsigned HOST_WIDE_INT) l2)))
1359 lv = l1, hv = h1;
1360 else
1361 lv = l2, hv = h2;
1362 break;
1363
1364 case SMAX:
1365 if (h1 > h2
1366 || (h1 == h2
1367 && ((unsigned HOST_WIDE_INT) l1
1368 > (unsigned HOST_WIDE_INT) l2)))
1369 lv = l1, hv = h1;
1370 else
1371 lv = l2, hv = h2;
1372 break;
1373
1374 case UMIN:
1375 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1376 || (h1 == h2
1377 && ((unsigned HOST_WIDE_INT) l1
1378 < (unsigned HOST_WIDE_INT) l2)))
1379 lv = l1, hv = h1;
1380 else
1381 lv = l2, hv = h2;
1382 break;
1383
1384 case UMAX:
1385 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1386 || (h1 == h2
1387 && ((unsigned HOST_WIDE_INT) l1
1388 > (unsigned HOST_WIDE_INT) l2)))
1389 lv = l1, hv = h1;
1390 else
1391 lv = l2, hv = h2;
1392 break;
1393
1394 case LSHIFTRT: case ASHIFTRT:
1395 case ASHIFT:
1396 case ROTATE: case ROTATERT:
1397 if (SHIFT_COUNT_TRUNCATED)
1398 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1399
1400 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1401 return 0;
1402
1403 if (code == LSHIFTRT || code == ASHIFTRT)
1404 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1405 code == ASHIFTRT);
1406 else if (code == ASHIFT)
1407 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1408 else if (code == ROTATE)
1409 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1410 else /* code == ROTATERT */
1411 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1412 break;
1413
1414 default:
1415 return 0;
1416 }
1417
1418 return immed_double_const (lv, hv, mode);
1419 }
1420
1421 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1422 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1423 {
1424 /* Even if we can't compute a constant result,
1425 there are some cases worth simplifying. */
1426
1427 switch (code)
1428 {
1429 case PLUS:
1430 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1431 when x is NaN, infinite, or finite and nonzero. They aren't
1432 when x is -0 and the rounding mode is not towards -infinity,
1433 since (-0) + 0 is then 0. */
1434 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1435 return op0;
1436
1437 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1438 transformations are safe even for IEEE. */
1439 if (GET_CODE (op0) == NEG)
1440 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1441 else if (GET_CODE (op1) == NEG)
1442 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1443
1444 /* (~a) + 1 -> -a */
1445 if (INTEGRAL_MODE_P (mode)
1446 && GET_CODE (op0) == NOT
1447 && trueop1 == const1_rtx)
1448 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1449
1450 /* Handle both-operands-constant cases. We can only add
1451 CONST_INTs to constants since the sum of relocatable symbols
1452 can't be handled by most assemblers. Don't add CONST_INT
1453 to CONST_INT since overflow won't be computed properly if wider
1454 than HOST_BITS_PER_WIDE_INT. */
1455
1456 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1457 && GET_CODE (op1) == CONST_INT)
1458 return plus_constant (op0, INTVAL (op1));
1459 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1460 && GET_CODE (op0) == CONST_INT)
1461 return plus_constant (op1, INTVAL (op0));
1462
1463 /* See if this is something like X * C - X or vice versa or
1464 if the multiplication is written as a shift. If so, we can
1465 distribute and make a new multiply, shift, or maybe just
1466 have X (if C is 2 in the example above). But don't make
1467 real multiply if we didn't have one before. */
1468
1469 if (! FLOAT_MODE_P (mode))
1470 {
1471 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1472 rtx lhs = op0, rhs = op1;
1473 int had_mult = 0;
1474
1475 if (GET_CODE (lhs) == NEG)
1476 coeff0 = -1, lhs = XEXP (lhs, 0);
1477 else if (GET_CODE (lhs) == MULT
1478 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1479 {
1480 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1481 had_mult = 1;
1482 }
1483 else if (GET_CODE (lhs) == ASHIFT
1484 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1485 && INTVAL (XEXP (lhs, 1)) >= 0
1486 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1487 {
1488 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1489 lhs = XEXP (lhs, 0);
1490 }
1491
1492 if (GET_CODE (rhs) == NEG)
1493 coeff1 = -1, rhs = XEXP (rhs, 0);
1494 else if (GET_CODE (rhs) == MULT
1495 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1496 {
1497 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1498 had_mult = 1;
1499 }
1500 else if (GET_CODE (rhs) == ASHIFT
1501 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1502 && INTVAL (XEXP (rhs, 1)) >= 0
1503 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1504 {
1505 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1506 rhs = XEXP (rhs, 0);
1507 }
1508
1509 if (rtx_equal_p (lhs, rhs))
1510 {
1511 tem = simplify_gen_binary (MULT, mode, lhs,
1512 GEN_INT (coeff0 + coeff1));
1513 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1514 }
1515 }
1516
1517 /* If one of the operands is a PLUS or a MINUS, see if we can
1518 simplify this by the associative law.
1519 Don't use the associative law for floating point.
1520 The inaccuracy makes it nonassociative,
1521 and subtle programs can break if operations are associated. */
1522
1523 if (INTEGRAL_MODE_P (mode)
1524 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1525 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1526 || (GET_CODE (op0) == CONST
1527 && GET_CODE (XEXP (op0, 0)) == PLUS)
1528 || (GET_CODE (op1) == CONST
1529 && GET_CODE (XEXP (op1, 0)) == PLUS))
1530 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1531 return tem;
1532
1533 /* Reassociate floating point addition only when the user
1534 specifies unsafe math optimizations. */
1535 if (FLOAT_MODE_P (mode)
1536 && flag_unsafe_math_optimizations)
1537 {
1538 tem = simplify_associative_operation (code, mode, op0, op1);
1539 if (tem)
1540 return tem;
1541 }
1542 break;
1543
1544 case COMPARE:
1545 #ifdef HAVE_cc0
1546 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1547 using cc0, in which case we want to leave it as a COMPARE
1548 so we can distinguish it from a register-register-copy.
1549
1550 In IEEE floating point, x-0 is not the same as x. */
1551
1552 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1553 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1554 && trueop1 == CONST0_RTX (mode))
1555 return op0;
1556 #endif
1557
1558 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1559 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1560 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1561 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1562 {
1563 rtx xop00 = XEXP (op0, 0);
1564 rtx xop10 = XEXP (op1, 0);
1565
1566 #ifdef HAVE_cc0
1567 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1568 #else
1569 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1570 && GET_MODE (xop00) == GET_MODE (xop10)
1571 && REGNO (xop00) == REGNO (xop10)
1572 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1573 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1574 #endif
1575 return xop00;
1576 }
1577 break;
1578
1579 case MINUS:
1580 /* We can't assume x-x is 0 even with non-IEEE floating point,
1581 but since it is zero except in very strange circumstances, we
1582 will treat it as zero with -funsafe-math-optimizations. */
1583 if (rtx_equal_p (trueop0, trueop1)
1584 && ! side_effects_p (op0)
1585 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1586 return CONST0_RTX (mode);
1587
1588 /* Change subtraction from zero into negation. (0 - x) is the
1589 same as -x when x is NaN, infinite, or finite and nonzero.
1590 But if the mode has signed zeros, and does not round towards
1591 -infinity, then 0 - 0 is 0, not -0. */
1592 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1593 return simplify_gen_unary (NEG, mode, op1, mode);
1594
1595 /* (-1 - a) is ~a. */
1596 if (trueop0 == constm1_rtx)
1597 return simplify_gen_unary (NOT, mode, op1, mode);
1598
1599 /* Subtracting 0 has no effect unless the mode has signed zeros
1600 and supports rounding towards -infinity. In such a case,
1601 0 - 0 is -0. */
1602 if (!(HONOR_SIGNED_ZEROS (mode)
1603 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1604 && trueop1 == CONST0_RTX (mode))
1605 return op0;
1606
1607 /* See if this is something like X * C - X or vice versa or
1608 if the multiplication is written as a shift. If so, we can
1609 distribute and make a new multiply, shift, or maybe just
1610 have X (if C is 2 in the example above). But don't make
1611 real multiply if we didn't have one before. */
1612
1613 if (! FLOAT_MODE_P (mode))
1614 {
1615 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1616 rtx lhs = op0, rhs = op1;
1617 int had_mult = 0;
1618
1619 if (GET_CODE (lhs) == NEG)
1620 coeff0 = -1, lhs = XEXP (lhs, 0);
1621 else if (GET_CODE (lhs) == MULT
1622 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1623 {
1624 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1625 had_mult = 1;
1626 }
1627 else if (GET_CODE (lhs) == ASHIFT
1628 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs, 1)) >= 0
1630 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1631 {
1632 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1633 lhs = XEXP (lhs, 0);
1634 }
1635
1636 if (GET_CODE (rhs) == NEG)
1637 coeff1 = - 1, rhs = XEXP (rhs, 0);
1638 else if (GET_CODE (rhs) == MULT
1639 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1640 {
1641 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1642 had_mult = 1;
1643 }
1644 else if (GET_CODE (rhs) == ASHIFT
1645 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1646 && INTVAL (XEXP (rhs, 1)) >= 0
1647 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1648 {
1649 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1650 rhs = XEXP (rhs, 0);
1651 }
1652
1653 if (rtx_equal_p (lhs, rhs))
1654 {
1655 tem = simplify_gen_binary (MULT, mode, lhs,
1656 GEN_INT (coeff0 - coeff1));
1657 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1658 }
1659 }
1660
1661 /* (a - (-b)) -> (a + b). True even for IEEE. */
1662 if (GET_CODE (op1) == NEG)
1663 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1664
1665 /* (-x - c) may be simplified as (-c - x). */
1666 if (GET_CODE (op0) == NEG
1667 && (GET_CODE (op1) == CONST_INT
1668 || GET_CODE (op1) == CONST_DOUBLE))
1669 {
1670 tem = simplify_unary_operation (NEG, mode, op1, mode);
1671 if (tem)
1672 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1673 }
1674
1675 /* If one of the operands is a PLUS or a MINUS, see if we can
1676 simplify this by the associative law.
1677 Don't use the associative law for floating point.
1678 The inaccuracy makes it nonassociative,
1679 and subtle programs can break if operations are associated. */
1680
1681 if (INTEGRAL_MODE_P (mode)
1682 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1683 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1684 || (GET_CODE (op0) == CONST
1685 && GET_CODE (XEXP (op0, 0)) == PLUS)
1686 || (GET_CODE (op1) == CONST
1687 && GET_CODE (XEXP (op1, 0)) == PLUS))
1688 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1689 return tem;
1690
1691 /* Don't let a relocatable value get a negative coeff. */
1692 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1693 return simplify_gen_binary (PLUS, mode,
1694 op0,
1695 neg_const_int (mode, op1));
1696
1697 /* (x - (x & y)) -> (x & ~y) */
1698 if (GET_CODE (op1) == AND)
1699 {
1700 if (rtx_equal_p (op0, XEXP (op1, 0)))
1701 {
1702 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1703 GET_MODE (XEXP (op1, 1)));
1704 return simplify_gen_binary (AND, mode, op0, tem);
1705 }
1706 if (rtx_equal_p (op0, XEXP (op1, 1)))
1707 {
1708 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1709 GET_MODE (XEXP (op1, 0)));
1710 return simplify_gen_binary (AND, mode, op0, tem);
1711 }
1712 }
1713 break;
1714
1715 case MULT:
1716 if (trueop1 == constm1_rtx)
1717 return simplify_gen_unary (NEG, mode, op0, mode);
1718
1719 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1720 x is NaN, since x * 0 is then also NaN. Nor is it valid
1721 when the mode has signed zeros, since multiplying a negative
1722 number by 0 will give -0, not 0. */
1723 if (!HONOR_NANS (mode)
1724 && !HONOR_SIGNED_ZEROS (mode)
1725 && trueop1 == CONST0_RTX (mode)
1726 && ! side_effects_p (op0))
1727 return op1;
1728
1729 /* In IEEE floating point, x*1 is not equivalent to x for
1730 signalling NaNs. */
1731 if (!HONOR_SNANS (mode)
1732 && trueop1 == CONST1_RTX (mode))
1733 return op0;
1734
1735 /* Convert multiply by constant power of two into shift unless
1736 we are still generating RTL. This test is a kludge. */
1737 if (GET_CODE (trueop1) == CONST_INT
1738 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1739 /* If the mode is larger than the host word size, and the
1740 uppermost bit is set, then this isn't a power of two due
1741 to implicit sign extension. */
1742 && (width <= HOST_BITS_PER_WIDE_INT
1743 || val != HOST_BITS_PER_WIDE_INT - 1)
1744 && ! rtx_equal_function_value_matters)
1745 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1746
1747 /* x*2 is x+x and x*(-1) is -x */
1748 if (GET_CODE (trueop1) == CONST_DOUBLE
1749 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1750 && GET_MODE (op0) == mode)
1751 {
1752 REAL_VALUE_TYPE d;
1753 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1754
1755 if (REAL_VALUES_EQUAL (d, dconst2))
1756 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1757
1758 if (REAL_VALUES_EQUAL (d, dconstm1))
1759 return simplify_gen_unary (NEG, mode, op0, mode);
1760 }
1761
1762 /* Reassociate multiplication, but for floating point MULTs
1763 only when the user specifies unsafe math optimizations. */
1764 if (! FLOAT_MODE_P (mode)
1765 || flag_unsafe_math_optimizations)
1766 {
1767 tem = simplify_associative_operation (code, mode, op0, op1);
1768 if (tem)
1769 return tem;
1770 }
1771 break;
1772
1773 case IOR:
1774 if (trueop1 == const0_rtx)
1775 return op0;
1776 if (GET_CODE (trueop1) == CONST_INT
1777 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1778 == GET_MODE_MASK (mode)))
1779 return op1;
1780 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1781 return op0;
1782 /* A | (~A) -> -1 */
1783 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1784 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1785 && ! side_effects_p (op0)
1786 && GET_MODE_CLASS (mode) != MODE_CC)
1787 return constm1_rtx;
1788 tem = simplify_associative_operation (code, mode, op0, op1);
1789 if (tem)
1790 return tem;
1791 break;
1792
1793 case XOR:
1794 if (trueop1 == const0_rtx)
1795 return op0;
1796 if (GET_CODE (trueop1) == CONST_INT
1797 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1798 == GET_MODE_MASK (mode)))
1799 return simplify_gen_unary (NOT, mode, op0, mode);
1800 if (trueop0 == trueop1 && ! side_effects_p (op0)
1801 && GET_MODE_CLASS (mode) != MODE_CC)
1802 return const0_rtx;
1803 tem = simplify_associative_operation (code, mode, op0, op1);
1804 if (tem)
1805 return tem;
1806 break;
1807
1808 case AND:
1809 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1810 return const0_rtx;
1811 if (GET_CODE (trueop1) == CONST_INT
1812 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1813 == GET_MODE_MASK (mode)))
1814 return op0;
1815 if (trueop0 == trueop1 && ! side_effects_p (op0)
1816 && GET_MODE_CLASS (mode) != MODE_CC)
1817 return op0;
1818 /* A & (~A) -> 0 */
1819 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1820 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1821 && ! side_effects_p (op0)
1822 && GET_MODE_CLASS (mode) != MODE_CC)
1823 return const0_rtx;
1824 tem = simplify_associative_operation (code, mode, op0, op1);
1825 if (tem)
1826 return tem;
1827 break;
1828
1829 case UDIV:
1830 /* Convert divide by power of two into shift (divide by 1 handled
1831 below). */
1832 if (GET_CODE (trueop1) == CONST_INT
1833 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1834 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1835
1836 /* Fall through.... */
1837
1838 case DIV:
1839 if (trueop1 == CONST1_RTX (mode))
1840 {
1841 /* On some platforms DIV uses narrower mode than its
1842 operands. */
1843 rtx x = gen_lowpart_common (mode, op0);
1844 if (x)
1845 return x;
1846 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1847 return gen_lowpart_SUBREG (mode, op0);
1848 else
1849 return op0;
1850 }
1851
1852 /* Maybe change 0 / x to 0. This transformation isn't safe for
1853 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1854 Nor is it safe for modes with signed zeros, since dividing
1855 0 by a negative number gives -0, not 0. */
1856 if (!HONOR_NANS (mode)
1857 && !HONOR_SIGNED_ZEROS (mode)
1858 && trueop0 == CONST0_RTX (mode)
1859 && ! side_effects_p (op1))
1860 return op0;
1861
1862 /* Change division by a constant into multiplication. Only do
1863 this with -funsafe-math-optimizations. */
1864 else if (GET_CODE (trueop1) == CONST_DOUBLE
1865 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1866 && trueop1 != CONST0_RTX (mode)
1867 && flag_unsafe_math_optimizations)
1868 {
1869 REAL_VALUE_TYPE d;
1870 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1871
1872 if (! REAL_VALUES_EQUAL (d, dconst0))
1873 {
1874 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1875 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1876 return simplify_gen_binary (MULT, mode, op0, tem);
1877 }
1878 }
1879 break;
1880
1881 case UMOD:
1882 /* Handle modulus by power of two (mod with 1 handled below). */
1883 if (GET_CODE (trueop1) == CONST_INT
1884 && exact_log2 (INTVAL (trueop1)) > 0)
1885 return simplify_gen_binary (AND, mode, op0,
1886 GEN_INT (INTVAL (op1) - 1));
1887
1888 /* Fall through.... */
1889
1890 case MOD:
1891 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1892 && ! side_effects_p (op0) && ! side_effects_p (op1))
1893 return const0_rtx;
1894 break;
1895
1896 case ROTATERT:
1897 case ROTATE:
1898 case ASHIFTRT:
1899 /* Rotating ~0 always results in ~0. */
1900 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1901 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1902 && ! side_effects_p (op1))
1903 return op0;
1904
1905 /* Fall through.... */
1906
1907 case ASHIFT:
1908 case LSHIFTRT:
1909 if (trueop1 == const0_rtx)
1910 return op0;
1911 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1912 return op0;
1913 break;
1914
1915 case SMIN:
1916 if (width <= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1) == CONST_INT
1918 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1919 && ! side_effects_p (op0))
1920 return op1;
1921 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1922 return op0;
1923 tem = simplify_associative_operation (code, mode, op0, op1);
1924 if (tem)
1925 return tem;
1926 break;
1927
1928 case SMAX:
1929 if (width <= HOST_BITS_PER_WIDE_INT
1930 && GET_CODE (trueop1) == CONST_INT
1931 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1932 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1933 && ! side_effects_p (op0))
1934 return op1;
1935 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1936 return op0;
1937 tem = simplify_associative_operation (code, mode, op0, op1);
1938 if (tem)
1939 return tem;
1940 break;
1941
1942 case UMIN:
1943 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1944 return op1;
1945 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1946 return op0;
1947 tem = simplify_associative_operation (code, mode, op0, op1);
1948 if (tem)
1949 return tem;
1950 break;
1951
1952 case UMAX:
1953 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1954 return op1;
1955 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1956 return op0;
1957 tem = simplify_associative_operation (code, mode, op0, op1);
1958 if (tem)
1959 return tem;
1960 break;
1961
1962 case SS_PLUS:
1963 case US_PLUS:
1964 case SS_MINUS:
1965 case US_MINUS:
1966 /* ??? There are simplifications that can be done. */
1967 return 0;
1968
1969 case VEC_SELECT:
1970 if (!VECTOR_MODE_P (mode))
1971 {
1972 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1973 || (mode
1974 != GET_MODE_INNER (GET_MODE (trueop0)))
1975 || GET_CODE (trueop1) != PARALLEL
1976 || XVECLEN (trueop1, 0) != 1
1977 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1978 abort ();
1979
1980 if (GET_CODE (trueop0) == CONST_VECTOR)
1981 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1982 }
1983 else
1984 {
1985 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1986 || (GET_MODE_INNER (mode)
1987 != GET_MODE_INNER (GET_MODE (trueop0)))
1988 || GET_CODE (trueop1) != PARALLEL)
1989 abort ();
1990
1991 if (GET_CODE (trueop0) == CONST_VECTOR)
1992 {
1993 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1994 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1995 rtvec v = rtvec_alloc (n_elts);
1996 unsigned int i;
1997
1998 if (XVECLEN (trueop1, 0) != (int) n_elts)
1999 abort ();
2000 for (i = 0; i < n_elts; i++)
2001 {
2002 rtx x = XVECEXP (trueop1, 0, i);
2003
2004 if (GET_CODE (x) != CONST_INT)
2005 abort ();
2006 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2007 }
2008
2009 return gen_rtx_CONST_VECTOR (mode, v);
2010 }
2011 }
2012 return 0;
2013 case VEC_CONCAT:
2014 {
2015 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2016 ? GET_MODE (trueop0)
2017 : GET_MODE_INNER (mode));
2018 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2019 ? GET_MODE (trueop1)
2020 : GET_MODE_INNER (mode));
2021
2022 if (!VECTOR_MODE_P (mode)
2023 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2024 != GET_MODE_SIZE (mode)))
2025 abort ();
2026
2027 if ((VECTOR_MODE_P (op0_mode)
2028 && (GET_MODE_INNER (mode)
2029 != GET_MODE_INNER (op0_mode)))
2030 || (!VECTOR_MODE_P (op0_mode)
2031 && GET_MODE_INNER (mode) != op0_mode))
2032 abort ();
2033
2034 if ((VECTOR_MODE_P (op1_mode)
2035 && (GET_MODE_INNER (mode)
2036 != GET_MODE_INNER (op1_mode)))
2037 || (!VECTOR_MODE_P (op1_mode)
2038 && GET_MODE_INNER (mode) != op1_mode))
2039 abort ();
2040
2041 if ((GET_CODE (trueop0) == CONST_VECTOR
2042 || GET_CODE (trueop0) == CONST_INT
2043 || GET_CODE (trueop0) == CONST_DOUBLE)
2044 && (GET_CODE (trueop1) == CONST_VECTOR
2045 || GET_CODE (trueop1) == CONST_INT
2046 || GET_CODE (trueop1) == CONST_DOUBLE))
2047 {
2048 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2049 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2050 rtvec v = rtvec_alloc (n_elts);
2051 unsigned int i;
2052 unsigned in_n_elts = 1;
2053
2054 if (VECTOR_MODE_P (op0_mode))
2055 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2056 for (i = 0; i < n_elts; i++)
2057 {
2058 if (i < in_n_elts)
2059 {
2060 if (!VECTOR_MODE_P (op0_mode))
2061 RTVEC_ELT (v, i) = trueop0;
2062 else
2063 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2064 }
2065 else
2066 {
2067 if (!VECTOR_MODE_P (op1_mode))
2068 RTVEC_ELT (v, i) = trueop1;
2069 else
2070 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2071 i - in_n_elts);
2072 }
2073 }
2074
2075 return gen_rtx_CONST_VECTOR (mode, v);
2076 }
2077 }
2078 return 0;
2079
2080 default:
2081 abort ();
2082 }
2083
2084 return 0;
2085 }
2086
2087 /* Get the integer argument values in two forms:
2088 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2089
2090 arg0 = INTVAL (trueop0);
2091 arg1 = INTVAL (trueop1);
2092
2093 if (width < HOST_BITS_PER_WIDE_INT)
2094 {
2095 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2096 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2097
2098 arg0s = arg0;
2099 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2100 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2101
2102 arg1s = arg1;
2103 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2104 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2105 }
2106 else
2107 {
2108 arg0s = arg0;
2109 arg1s = arg1;
2110 }
2111
2112 /* Compute the value of the arithmetic. */
2113
2114 switch (code)
2115 {
2116 case PLUS:
2117 val = arg0s + arg1s;
2118 break;
2119
2120 case MINUS:
2121 val = arg0s - arg1s;
2122 break;
2123
2124 case MULT:
2125 val = arg0s * arg1s;
2126 break;
2127
2128 case DIV:
2129 if (arg1s == 0
2130 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2131 && arg1s == -1))
2132 return 0;
2133 val = arg0s / arg1s;
2134 break;
2135
2136 case MOD:
2137 if (arg1s == 0
2138 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2139 && arg1s == -1))
2140 return 0;
2141 val = arg0s % arg1s;
2142 break;
2143
2144 case UDIV:
2145 if (arg1 == 0
2146 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2147 && arg1s == -1))
2148 return 0;
2149 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2150 break;
2151
2152 case UMOD:
2153 if (arg1 == 0
2154 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2155 && arg1s == -1))
2156 return 0;
2157 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2158 break;
2159
2160 case AND:
2161 val = arg0 & arg1;
2162 break;
2163
2164 case IOR:
2165 val = arg0 | arg1;
2166 break;
2167
2168 case XOR:
2169 val = arg0 ^ arg1;
2170 break;
2171
2172 case LSHIFTRT:
2173 /* If shift count is undefined, don't fold it; let the machine do
2174 what it wants. But truncate it if the machine will do that. */
2175 if (arg1 < 0)
2176 return 0;
2177
2178 if (SHIFT_COUNT_TRUNCATED)
2179 arg1 %= width;
2180
2181 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2182 break;
2183
2184 case ASHIFT:
2185 if (arg1 < 0)
2186 return 0;
2187
2188 if (SHIFT_COUNT_TRUNCATED)
2189 arg1 %= width;
2190
2191 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2192 break;
2193
2194 case ASHIFTRT:
2195 if (arg1 < 0)
2196 return 0;
2197
2198 if (SHIFT_COUNT_TRUNCATED)
2199 arg1 %= width;
2200
2201 val = arg0s >> arg1;
2202
2203 /* Bootstrap compiler may not have sign extended the right shift.
2204 Manually extend the sign to insure bootstrap cc matches gcc. */
2205 if (arg0s < 0 && arg1 > 0)
2206 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2207
2208 break;
2209
2210 case ROTATERT:
2211 if (arg1 < 0)
2212 return 0;
2213
2214 arg1 %= width;
2215 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2216 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2217 break;
2218
2219 case ROTATE:
2220 if (arg1 < 0)
2221 return 0;
2222
2223 arg1 %= width;
2224 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2225 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2226 break;
2227
2228 case COMPARE:
2229 /* Do nothing here. */
2230 return 0;
2231
2232 case SMIN:
2233 val = arg0s <= arg1s ? arg0s : arg1s;
2234 break;
2235
2236 case UMIN:
2237 val = ((unsigned HOST_WIDE_INT) arg0
2238 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2239 break;
2240
2241 case SMAX:
2242 val = arg0s > arg1s ? arg0s : arg1s;
2243 break;
2244
2245 case UMAX:
2246 val = ((unsigned HOST_WIDE_INT) arg0
2247 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2248 break;
2249
2250 case SS_PLUS:
2251 case US_PLUS:
2252 case SS_MINUS:
2253 case US_MINUS:
2254 /* ??? There are simplifications that can be done. */
2255 return 0;
2256
2257 default:
2258 abort ();
2259 }
2260
2261 val = trunc_int_for_mode (val, mode);
2262
2263 return GEN_INT (val);
2264 }
2265 \f
2266 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2267 PLUS or MINUS.
2268
2269 Rather than test for specific case, we do this by a brute-force method
2270 and do all possible simplifications until no more changes occur. Then
2271 we rebuild the operation.
2272
2273 If FORCE is true, then always generate the rtx. This is used to
2274 canonicalize stuff emitted from simplify_gen_binary. Note that this
2275 can still fail if the rtx is too complex. It won't fail just because
2276 the result is not 'simpler' than the input, however. */
2277
2278 struct simplify_plus_minus_op_data
2279 {
2280 rtx op;
2281 int neg;
2282 };
2283
2284 static int
2285 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2286 {
2287 const struct simplify_plus_minus_op_data *d1 = p1;
2288 const struct simplify_plus_minus_op_data *d2 = p2;
2289
2290 return (commutative_operand_precedence (d2->op)
2291 - commutative_operand_precedence (d1->op));
2292 }
2293
2294 static rtx
2295 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2296 rtx op1, int force)
2297 {
2298 struct simplify_plus_minus_op_data ops[8];
2299 rtx result, tem;
2300 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2301 int first, changed;
2302 int i, j;
2303
2304 memset (ops, 0, sizeof ops);
2305
2306 /* Set up the two operands and then expand them until nothing has been
2307 changed. If we run out of room in our array, give up; this should
2308 almost never happen. */
2309
2310 ops[0].op = op0;
2311 ops[0].neg = 0;
2312 ops[1].op = op1;
2313 ops[1].neg = (code == MINUS);
2314
2315 do
2316 {
2317 changed = 0;
2318
2319 for (i = 0; i < n_ops; i++)
2320 {
2321 rtx this_op = ops[i].op;
2322 int this_neg = ops[i].neg;
2323 enum rtx_code this_code = GET_CODE (this_op);
2324
2325 switch (this_code)
2326 {
2327 case PLUS:
2328 case MINUS:
2329 if (n_ops == 7)
2330 return NULL_RTX;
2331
2332 ops[n_ops].op = XEXP (this_op, 1);
2333 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2334 n_ops++;
2335
2336 ops[i].op = XEXP (this_op, 0);
2337 input_ops++;
2338 changed = 1;
2339 break;
2340
2341 case NEG:
2342 ops[i].op = XEXP (this_op, 0);
2343 ops[i].neg = ! this_neg;
2344 changed = 1;
2345 break;
2346
2347 case CONST:
2348 if (n_ops < 7
2349 && GET_CODE (XEXP (this_op, 0)) == PLUS
2350 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2351 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2352 {
2353 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2354 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2355 ops[n_ops].neg = this_neg;
2356 n_ops++;
2357 input_consts++;
2358 changed = 1;
2359 }
2360 break;
2361
2362 case NOT:
2363 /* ~a -> (-a - 1) */
2364 if (n_ops != 7)
2365 {
2366 ops[n_ops].op = constm1_rtx;
2367 ops[n_ops++].neg = this_neg;
2368 ops[i].op = XEXP (this_op, 0);
2369 ops[i].neg = !this_neg;
2370 changed = 1;
2371 }
2372 break;
2373
2374 case CONST_INT:
2375 if (this_neg)
2376 {
2377 ops[i].op = neg_const_int (mode, this_op);
2378 ops[i].neg = 0;
2379 changed = 1;
2380 }
2381 break;
2382
2383 default:
2384 break;
2385 }
2386 }
2387 }
2388 while (changed);
2389
2390 /* If we only have two operands, we can't do anything. */
2391 if (n_ops <= 2 && !force)
2392 return NULL_RTX;
2393
2394 /* Count the number of CONSTs we didn't split above. */
2395 for (i = 0; i < n_ops; i++)
2396 if (GET_CODE (ops[i].op) == CONST)
2397 input_consts++;
2398
2399 /* Now simplify each pair of operands until nothing changes. The first
2400 time through just simplify constants against each other. */
2401
2402 first = 1;
2403 do
2404 {
2405 changed = first;
2406
2407 for (i = 0; i < n_ops - 1; i++)
2408 for (j = i + 1; j < n_ops; j++)
2409 {
2410 rtx lhs = ops[i].op, rhs = ops[j].op;
2411 int lneg = ops[i].neg, rneg = ops[j].neg;
2412
2413 if (lhs != 0 && rhs != 0
2414 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2415 {
2416 enum rtx_code ncode = PLUS;
2417
2418 if (lneg != rneg)
2419 {
2420 ncode = MINUS;
2421 if (lneg)
2422 tem = lhs, lhs = rhs, rhs = tem;
2423 }
2424 else if (swap_commutative_operands_p (lhs, rhs))
2425 tem = lhs, lhs = rhs, rhs = tem;
2426
2427 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2428
2429 /* Reject "simplifications" that just wrap the two
2430 arguments in a CONST. Failure to do so can result
2431 in infinite recursion with simplify_binary_operation
2432 when it calls us to simplify CONST operations. */
2433 if (tem
2434 && ! (GET_CODE (tem) == CONST
2435 && GET_CODE (XEXP (tem, 0)) == ncode
2436 && XEXP (XEXP (tem, 0), 0) == lhs
2437 && XEXP (XEXP (tem, 0), 1) == rhs)
2438 /* Don't allow -x + -1 -> ~x simplifications in the
2439 first pass. This allows us the chance to combine
2440 the -1 with other constants. */
2441 && ! (first
2442 && GET_CODE (tem) == NOT
2443 && XEXP (tem, 0) == rhs))
2444 {
2445 lneg &= rneg;
2446 if (GET_CODE (tem) == NEG)
2447 tem = XEXP (tem, 0), lneg = !lneg;
2448 if (GET_CODE (tem) == CONST_INT && lneg)
2449 tem = neg_const_int (mode, tem), lneg = 0;
2450
2451 ops[i].op = tem;
2452 ops[i].neg = lneg;
2453 ops[j].op = NULL_RTX;
2454 changed = 1;
2455 }
2456 }
2457 }
2458
2459 first = 0;
2460 }
2461 while (changed);
2462
2463 /* Pack all the operands to the lower-numbered entries. */
2464 for (i = 0, j = 0; j < n_ops; j++)
2465 if (ops[j].op)
2466 ops[i++] = ops[j];
2467 n_ops = i;
2468
2469 /* Sort the operations based on swap_commutative_operands_p. */
2470 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2471
2472 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2473 if (n_ops == 2
2474 && GET_CODE (ops[1].op) == CONST_INT
2475 && CONSTANT_P (ops[0].op)
2476 && ops[0].neg)
2477 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2478
2479 /* We suppressed creation of trivial CONST expressions in the
2480 combination loop to avoid recursion. Create one manually now.
2481 The combination loop should have ensured that there is exactly
2482 one CONST_INT, and the sort will have ensured that it is last
2483 in the array and that any other constant will be next-to-last. */
2484
2485 if (n_ops > 1
2486 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2487 && CONSTANT_P (ops[n_ops - 2].op))
2488 {
2489 rtx value = ops[n_ops - 1].op;
2490 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2491 value = neg_const_int (mode, value);
2492 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2493 n_ops--;
2494 }
2495
2496 /* Count the number of CONSTs that we generated. */
2497 n_consts = 0;
2498 for (i = 0; i < n_ops; i++)
2499 if (GET_CODE (ops[i].op) == CONST)
2500 n_consts++;
2501
2502 /* Give up if we didn't reduce the number of operands we had. Make
2503 sure we count a CONST as two operands. If we have the same
2504 number of operands, but have made more CONSTs than before, this
2505 is also an improvement, so accept it. */
2506 if (!force
2507 && (n_ops + n_consts > input_ops
2508 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2509 return NULL_RTX;
2510
2511 /* Put a non-negated operand first, if possible. */
2512
2513 for (i = 0; i < n_ops && ops[i].neg; i++)
2514 continue;
2515 if (i == n_ops)
2516 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2517 else if (i != 0)
2518 {
2519 tem = ops[0].op;
2520 ops[0] = ops[i];
2521 ops[i].op = tem;
2522 ops[i].neg = 1;
2523 }
2524
2525 /* Now make the result by performing the requested operations. */
2526 result = ops[0].op;
2527 for (i = 1; i < n_ops; i++)
2528 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2529 mode, result, ops[i].op);
2530
2531 return result;
2532 }
2533
2534 /* Like simplify_binary_operation except used for relational operators.
2535 MODE is the mode of the operands, not that of the result. If MODE
2536 is VOIDmode, both operands must also be VOIDmode and we compare the
2537 operands in "infinite precision".
2538
2539 If no simplification is possible, this function returns zero.
2540 Otherwise, it returns either const_true_rtx or const0_rtx. */
2541
2542 rtx
2543 simplify_const_relational_operation (enum rtx_code code,
2544 enum machine_mode mode,
2545 rtx op0, rtx op1)
2546 {
2547 int equal, op0lt, op0ltu, op1lt, op1ltu;
2548 rtx tem;
2549 rtx trueop0;
2550 rtx trueop1;
2551
2552 if (mode == VOIDmode
2553 && (GET_MODE (op0) != VOIDmode
2554 || GET_MODE (op1) != VOIDmode))
2555 abort ();
2556
2557 /* If op0 is a compare, extract the comparison arguments from it. */
2558 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2559 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2560
2561 /* We can't simplify MODE_CC values since we don't know what the
2562 actual comparison is. */
2563 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2564 return 0;
2565
2566 /* Make sure the constant is second. */
2567 if (swap_commutative_operands_p (op0, op1))
2568 {
2569 tem = op0, op0 = op1, op1 = tem;
2570 code = swap_condition (code);
2571 }
2572
2573 trueop0 = avoid_constant_pool_reference (op0);
2574 trueop1 = avoid_constant_pool_reference (op1);
2575
2576 /* For integer comparisons of A and B maybe we can simplify A - B and can
2577 then simplify a comparison of that with zero. If A and B are both either
2578 a register or a CONST_INT, this can't help; testing for these cases will
2579 prevent infinite recursion here and speed things up.
2580
2581 If CODE is an unsigned comparison, then we can never do this optimization,
2582 because it gives an incorrect result if the subtraction wraps around zero.
2583 ANSI C defines unsigned operations such that they never overflow, and
2584 thus such cases can not be ignored; but we cannot do it even for
2585 signed comparisons for languages such as Java, so test flag_wrapv. */
2586
2587 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2588 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2589 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2590 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2591 /* We cannot do this for == or != if tem is a nonzero address. */
2592 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2593 && code != GTU && code != GEU && code != LTU && code != LEU)
2594 return simplify_const_relational_operation (signed_condition (code),
2595 mode, tem, const0_rtx);
2596
2597 if (flag_unsafe_math_optimizations && code == ORDERED)
2598 return const_true_rtx;
2599
2600 if (flag_unsafe_math_optimizations && code == UNORDERED)
2601 return const0_rtx;
2602
2603 /* For modes without NaNs, if the two operands are equal, we know the
2604 result except if they have side-effects. */
2605 if (! HONOR_NANS (GET_MODE (trueop0))
2606 && rtx_equal_p (trueop0, trueop1)
2607 && ! side_effects_p (trueop0))
2608 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2609
2610 /* If the operands are floating-point constants, see if we can fold
2611 the result. */
2612 else if (GET_CODE (trueop0) == CONST_DOUBLE
2613 && GET_CODE (trueop1) == CONST_DOUBLE
2614 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2615 {
2616 REAL_VALUE_TYPE d0, d1;
2617
2618 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2619 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2620
2621 /* Comparisons are unordered iff at least one of the values is NaN. */
2622 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2623 switch (code)
2624 {
2625 case UNEQ:
2626 case UNLT:
2627 case UNGT:
2628 case UNLE:
2629 case UNGE:
2630 case NE:
2631 case UNORDERED:
2632 return const_true_rtx;
2633 case EQ:
2634 case LT:
2635 case GT:
2636 case LE:
2637 case GE:
2638 case LTGT:
2639 case ORDERED:
2640 return const0_rtx;
2641 default:
2642 return 0;
2643 }
2644
2645 equal = REAL_VALUES_EQUAL (d0, d1);
2646 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2647 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2648 }
2649
2650 /* Otherwise, see if the operands are both integers. */
2651 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2652 && (GET_CODE (trueop0) == CONST_DOUBLE
2653 || GET_CODE (trueop0) == CONST_INT)
2654 && (GET_CODE (trueop1) == CONST_DOUBLE
2655 || GET_CODE (trueop1) == CONST_INT))
2656 {
2657 int width = GET_MODE_BITSIZE (mode);
2658 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2659 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2660
2661 /* Get the two words comprising each integer constant. */
2662 if (GET_CODE (trueop0) == CONST_DOUBLE)
2663 {
2664 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2665 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2666 }
2667 else
2668 {
2669 l0u = l0s = INTVAL (trueop0);
2670 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2671 }
2672
2673 if (GET_CODE (trueop1) == CONST_DOUBLE)
2674 {
2675 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2676 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2677 }
2678 else
2679 {
2680 l1u = l1s = INTVAL (trueop1);
2681 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2682 }
2683
2684 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2685 we have to sign or zero-extend the values. */
2686 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2687 {
2688 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2689 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2690
2691 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2692 l0s |= ((HOST_WIDE_INT) (-1) << width);
2693
2694 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2695 l1s |= ((HOST_WIDE_INT) (-1) << width);
2696 }
2697 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2698 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2699
2700 equal = (h0u == h1u && l0u == l1u);
2701 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2702 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2703 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2704 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2705 }
2706
2707 /* Otherwise, there are some code-specific tests we can make. */
2708 else
2709 {
2710 switch (code)
2711 {
2712 case EQ:
2713 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2714 return const0_rtx;
2715 break;
2716
2717 case NE:
2718 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2719 return const_true_rtx;
2720 break;
2721
2722 case GEU:
2723 /* Unsigned values are never negative. */
2724 if (trueop1 == const0_rtx)
2725 return const_true_rtx;
2726 break;
2727
2728 case LTU:
2729 if (trueop1 == const0_rtx)
2730 return const0_rtx;
2731 break;
2732
2733 case LEU:
2734 /* Unsigned values are never greater than the largest
2735 unsigned value. */
2736 if (GET_CODE (trueop1) == CONST_INT
2737 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2738 && INTEGRAL_MODE_P (mode))
2739 return const_true_rtx;
2740 break;
2741
2742 case GTU:
2743 if (GET_CODE (trueop1) == CONST_INT
2744 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2745 && INTEGRAL_MODE_P (mode))
2746 return const0_rtx;
2747 break;
2748
2749 case LT:
2750 /* Optimize abs(x) < 0.0. */
2751 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2752 {
2753 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2754 : trueop0;
2755 if (GET_CODE (tem) == ABS)
2756 return const0_rtx;
2757 }
2758 break;
2759
2760 case GE:
2761 /* Optimize abs(x) >= 0.0. */
2762 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2763 {
2764 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2765 : trueop0;
2766 if (GET_CODE (tem) == ABS)
2767 return const_true_rtx;
2768 }
2769 break;
2770
2771 case UNGE:
2772 /* Optimize ! (abs(x) < 0.0). */
2773 if (trueop1 == CONST0_RTX (mode))
2774 {
2775 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2776 : trueop0;
2777 if (GET_CODE (tem) == ABS)
2778 return const_true_rtx;
2779 }
2780 break;
2781
2782 default:
2783 break;
2784 }
2785
2786 return 0;
2787 }
2788
2789 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2790 as appropriate. */
2791 switch (code)
2792 {
2793 case EQ:
2794 case UNEQ:
2795 return equal ? const_true_rtx : const0_rtx;
2796 case NE:
2797 case LTGT:
2798 return ! equal ? const_true_rtx : const0_rtx;
2799 case LT:
2800 case UNLT:
2801 return op0lt ? const_true_rtx : const0_rtx;
2802 case GT:
2803 case UNGT:
2804 return op1lt ? const_true_rtx : const0_rtx;
2805 case LTU:
2806 return op0ltu ? const_true_rtx : const0_rtx;
2807 case GTU:
2808 return op1ltu ? const_true_rtx : const0_rtx;
2809 case LE:
2810 case UNLE:
2811 return equal || op0lt ? const_true_rtx : const0_rtx;
2812 case GE:
2813 case UNGE:
2814 return equal || op1lt ? const_true_rtx : const0_rtx;
2815 case LEU:
2816 return equal || op0ltu ? const_true_rtx : const0_rtx;
2817 case GEU:
2818 return equal || op1ltu ? const_true_rtx : const0_rtx;
2819 case ORDERED:
2820 return const_true_rtx;
2821 case UNORDERED:
2822 return const0_rtx;
2823 default:
2824 abort ();
2825 }
2826 }
2827
2828 /* Like simplify_binary_operation except used for relational operators.
2829 MODE is the mode of the result, and CMP_MODE is the mode of the operands.
2830 If CMP_MODE is VOIDmode, both operands must also be VOIDmode and we
2831 compare the operands in "infinite precision". */
2832
2833 rtx
2834 simplify_relational_operation (enum rtx_code code,
2835 enum machine_mode mode ATTRIBUTE_UNUSED,
2836 enum machine_mode cmp_mode, rtx op0, rtx op1)
2837 {
2838 rtx tmp;
2839
2840 tmp = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2841 if (tmp)
2842 {
2843 #ifdef FLOAT_STORE_FLAG_VALUE
2844 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2845 {
2846 if (tmp == const0_rtx)
2847 return CONST0_RTX (mode);
2848 return CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
2849 mode);
2850 }
2851 #endif
2852 return tmp;
2853 }
2854
2855 return NULL_RTX;
2856 }
2857 \f
2858 /* Simplify CODE, an operation with result mode MODE and three operands,
2859 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2860 a constant. Return 0 if no simplifications is possible. */
2861
2862 rtx
2863 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2864 enum machine_mode op0_mode, rtx op0, rtx op1,
2865 rtx op2)
2866 {
2867 unsigned int width = GET_MODE_BITSIZE (mode);
2868
2869 /* VOIDmode means "infinite" precision. */
2870 if (width == 0)
2871 width = HOST_BITS_PER_WIDE_INT;
2872
2873 switch (code)
2874 {
2875 case SIGN_EXTRACT:
2876 case ZERO_EXTRACT:
2877 if (GET_CODE (op0) == CONST_INT
2878 && GET_CODE (op1) == CONST_INT
2879 && GET_CODE (op2) == CONST_INT
2880 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2881 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2882 {
2883 /* Extracting a bit-field from a constant */
2884 HOST_WIDE_INT val = INTVAL (op0);
2885
2886 if (BITS_BIG_ENDIAN)
2887 val >>= (GET_MODE_BITSIZE (op0_mode)
2888 - INTVAL (op2) - INTVAL (op1));
2889 else
2890 val >>= INTVAL (op2);
2891
2892 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2893 {
2894 /* First zero-extend. */
2895 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2896 /* If desired, propagate sign bit. */
2897 if (code == SIGN_EXTRACT
2898 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2899 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2900 }
2901
2902 /* Clear the bits that don't belong in our mode,
2903 unless they and our sign bit are all one.
2904 So we get either a reasonable negative value or a reasonable
2905 unsigned value for this mode. */
2906 if (width < HOST_BITS_PER_WIDE_INT
2907 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2908 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2909 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2910
2911 return GEN_INT (val);
2912 }
2913 break;
2914
2915 case IF_THEN_ELSE:
2916 if (GET_CODE (op0) == CONST_INT)
2917 return op0 != const0_rtx ? op1 : op2;
2918
2919 /* Convert c ? a : a into "a". */
2920 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2921 return op1;
2922
2923 /* Convert a != b ? a : b into "a". */
2924 if (GET_CODE (op0) == NE
2925 && ! side_effects_p (op0)
2926 && ! HONOR_NANS (mode)
2927 && ! HONOR_SIGNED_ZEROS (mode)
2928 && ((rtx_equal_p (XEXP (op0, 0), op1)
2929 && rtx_equal_p (XEXP (op0, 1), op2))
2930 || (rtx_equal_p (XEXP (op0, 0), op2)
2931 && rtx_equal_p (XEXP (op0, 1), op1))))
2932 return op1;
2933
2934 /* Convert a == b ? a : b into "b". */
2935 if (GET_CODE (op0) == EQ
2936 && ! side_effects_p (op0)
2937 && ! HONOR_NANS (mode)
2938 && ! HONOR_SIGNED_ZEROS (mode)
2939 && ((rtx_equal_p (XEXP (op0, 0), op1)
2940 && rtx_equal_p (XEXP (op0, 1), op2))
2941 || (rtx_equal_p (XEXP (op0, 0), op2)
2942 && rtx_equal_p (XEXP (op0, 1), op1))))
2943 return op2;
2944
2945 if (COMPARISON_P (op0) && ! side_effects_p (op0))
2946 {
2947 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2948 ? GET_MODE (XEXP (op0, 1))
2949 : GET_MODE (XEXP (op0, 0)));
2950 rtx temp;
2951 if (cmp_mode == VOIDmode)
2952 cmp_mode = op0_mode;
2953 temp = simplify_const_relational_operation (GET_CODE (op0),
2954 cmp_mode,
2955 XEXP (op0, 0),
2956 XEXP (op0, 1));
2957
2958 /* See if any simplifications were possible. */
2959 if (temp == const0_rtx)
2960 return op2;
2961 else if (temp == const_true_rtx)
2962 return op1;
2963 else if (temp)
2964 abort ();
2965
2966 /* Look for happy constants in op1 and op2. */
2967 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2968 {
2969 HOST_WIDE_INT t = INTVAL (op1);
2970 HOST_WIDE_INT f = INTVAL (op2);
2971
2972 if (t == STORE_FLAG_VALUE && f == 0)
2973 code = GET_CODE (op0);
2974 else if (t == 0 && f == STORE_FLAG_VALUE)
2975 {
2976 enum rtx_code tmp;
2977 tmp = reversed_comparison_code (op0, NULL_RTX);
2978 if (tmp == UNKNOWN)
2979 break;
2980 code = tmp;
2981 }
2982 else
2983 break;
2984
2985 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2986 }
2987 }
2988 break;
2989
2990 case VEC_MERGE:
2991 if (GET_MODE (op0) != mode
2992 || GET_MODE (op1) != mode
2993 || !VECTOR_MODE_P (mode))
2994 abort ();
2995 op2 = avoid_constant_pool_reference (op2);
2996 if (GET_CODE (op2) == CONST_INT)
2997 {
2998 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2999 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3000 int mask = (1 << n_elts) - 1;
3001
3002 if (!(INTVAL (op2) & mask))
3003 return op1;
3004 if ((INTVAL (op2) & mask) == mask)
3005 return op0;
3006
3007 op0 = avoid_constant_pool_reference (op0);
3008 op1 = avoid_constant_pool_reference (op1);
3009 if (GET_CODE (op0) == CONST_VECTOR
3010 && GET_CODE (op1) == CONST_VECTOR)
3011 {
3012 rtvec v = rtvec_alloc (n_elts);
3013 unsigned int i;
3014
3015 for (i = 0; i < n_elts; i++)
3016 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3017 ? CONST_VECTOR_ELT (op0, i)
3018 : CONST_VECTOR_ELT (op1, i));
3019 return gen_rtx_CONST_VECTOR (mode, v);
3020 }
3021 }
3022 break;
3023
3024 default:
3025 abort ();
3026 }
3027
3028 return 0;
3029 }
3030
3031 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3032 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3033
3034 Works by unpacking OP into a collection of 8-bit values
3035 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3036 and then repacking them again for OUTERMODE. */
3037
3038 static rtx
3039 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3040 enum machine_mode innermode, unsigned int byte)
3041 {
3042 /* We support up to 512-bit values (for V8DFmode). */
3043 enum {
3044 max_bitsize = 512,
3045 value_bit = 8,
3046 value_mask = (1 << value_bit) - 1
3047 };
3048 unsigned char value[max_bitsize / value_bit];
3049 int value_start;
3050 int i;
3051 int elem;
3052
3053 int num_elem;
3054 rtx * elems;
3055 int elem_bitsize;
3056 rtx result_s;
3057 rtvec result_v = NULL;
3058 enum mode_class outer_class;
3059 enum machine_mode outer_submode;
3060
3061 /* Some ports misuse CCmode. */
3062 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3063 return op;
3064
3065 /* Unpack the value. */
3066
3067 if (GET_CODE (op) == CONST_VECTOR)
3068 {
3069 num_elem = CONST_VECTOR_NUNITS (op);
3070 elems = &CONST_VECTOR_ELT (op, 0);
3071 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3072 }
3073 else
3074 {
3075 num_elem = 1;
3076 elems = &op;
3077 elem_bitsize = max_bitsize;
3078 }
3079
3080 if (BITS_PER_UNIT % value_bit != 0)
3081 abort (); /* Too complicated; reducing value_bit may help. */
3082 if (elem_bitsize % BITS_PER_UNIT != 0)
3083 abort (); /* I don't know how to handle endianness of sub-units. */
3084
3085 for (elem = 0; elem < num_elem; elem++)
3086 {
3087 unsigned char * vp;
3088 rtx el = elems[elem];
3089
3090 /* Vectors are kept in target memory order. (This is probably
3091 a mistake.) */
3092 {
3093 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3094 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3095 / BITS_PER_UNIT);
3096 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3097 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3098 unsigned bytele = (subword_byte % UNITS_PER_WORD
3099 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3100 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3101 }
3102
3103 switch (GET_CODE (el))
3104 {
3105 case CONST_INT:
3106 for (i = 0;
3107 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3108 i += value_bit)
3109 *vp++ = INTVAL (el) >> i;
3110 /* CONST_INTs are always logically sign-extended. */
3111 for (; i < elem_bitsize; i += value_bit)
3112 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3113 break;
3114
3115 case CONST_DOUBLE:
3116 if (GET_MODE (el) == VOIDmode)
3117 {
3118 /* If this triggers, someone should have generated a
3119 CONST_INT instead. */
3120 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3121 abort ();
3122
3123 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3124 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3125 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3126 {
3127 *vp++
3128 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3129 i += value_bit;
3130 }
3131 /* It shouldn't matter what's done here, so fill it with
3132 zero. */
3133 for (; i < max_bitsize; i += value_bit)
3134 *vp++ = 0;
3135 }
3136 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3137 {
3138 long tmp[max_bitsize / 32];
3139 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3140
3141 if (bitsize > elem_bitsize)
3142 abort ();
3143 if (bitsize % value_bit != 0)
3144 abort ();
3145
3146 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3147 GET_MODE (el));
3148
3149 /* real_to_target produces its result in words affected by
3150 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3151 and use WORDS_BIG_ENDIAN instead; see the documentation
3152 of SUBREG in rtl.texi. */
3153 for (i = 0; i < bitsize; i += value_bit)
3154 {
3155 int ibase;
3156 if (WORDS_BIG_ENDIAN)
3157 ibase = bitsize - 1 - i;
3158 else
3159 ibase = i;
3160 *vp++ = tmp[ibase / 32] >> i % 32;
3161 }
3162
3163 /* It shouldn't matter what's done here, so fill it with
3164 zero. */
3165 for (; i < elem_bitsize; i += value_bit)
3166 *vp++ = 0;
3167 }
3168 else
3169 abort ();
3170 break;
3171
3172 default:
3173 abort ();
3174 }
3175 }
3176
3177 /* Now, pick the right byte to start with. */
3178 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3179 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3180 will already have offset 0. */
3181 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3182 {
3183 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3184 - byte);
3185 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3186 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3187 byte = (subword_byte % UNITS_PER_WORD
3188 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3189 }
3190
3191 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3192 so if it's become negative it will instead be very large.) */
3193 if (byte >= GET_MODE_SIZE (innermode))
3194 abort ();
3195
3196 /* Convert from bytes to chunks of size value_bit. */
3197 value_start = byte * (BITS_PER_UNIT / value_bit);
3198
3199 /* Re-pack the value. */
3200
3201 if (VECTOR_MODE_P (outermode))
3202 {
3203 num_elem = GET_MODE_NUNITS (outermode);
3204 result_v = rtvec_alloc (num_elem);
3205 elems = &RTVEC_ELT (result_v, 0);
3206 outer_submode = GET_MODE_INNER (outermode);
3207 }
3208 else
3209 {
3210 num_elem = 1;
3211 elems = &result_s;
3212 outer_submode = outermode;
3213 }
3214
3215 outer_class = GET_MODE_CLASS (outer_submode);
3216 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3217
3218 if (elem_bitsize % value_bit != 0)
3219 abort ();
3220 if (elem_bitsize + value_start * value_bit > max_bitsize)
3221 abort ();
3222
3223 for (elem = 0; elem < num_elem; elem++)
3224 {
3225 unsigned char *vp;
3226
3227 /* Vectors are stored in target memory order. (This is probably
3228 a mistake.) */
3229 {
3230 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3231 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3232 / BITS_PER_UNIT);
3233 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3234 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3235 unsigned bytele = (subword_byte % UNITS_PER_WORD
3236 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3237 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3238 }
3239
3240 switch (outer_class)
3241 {
3242 case MODE_INT:
3243 case MODE_PARTIAL_INT:
3244 {
3245 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3246
3247 for (i = 0;
3248 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3249 i += value_bit)
3250 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3251 for (; i < elem_bitsize; i += value_bit)
3252 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3253 << (i - HOST_BITS_PER_WIDE_INT));
3254
3255 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3256 know why. */
3257 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3258 elems[elem] = gen_int_mode (lo, outer_submode);
3259 else
3260 elems[elem] = immed_double_const (lo, hi, outer_submode);
3261 }
3262 break;
3263
3264 case MODE_FLOAT:
3265 {
3266 REAL_VALUE_TYPE r;
3267 long tmp[max_bitsize / 32];
3268
3269 /* real_from_target wants its input in words affected by
3270 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3271 and use WORDS_BIG_ENDIAN instead; see the documentation
3272 of SUBREG in rtl.texi. */
3273 for (i = 0; i < max_bitsize / 32; i++)
3274 tmp[i] = 0;
3275 for (i = 0; i < elem_bitsize; i += value_bit)
3276 {
3277 int ibase;
3278 if (WORDS_BIG_ENDIAN)
3279 ibase = elem_bitsize - 1 - i;
3280 else
3281 ibase = i;
3282 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3283 }
3284
3285 real_from_target (&r, tmp, outer_submode);
3286 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3287 }
3288 break;
3289
3290 default:
3291 abort ();
3292 }
3293 }
3294 if (VECTOR_MODE_P (outermode))
3295 return gen_rtx_CONST_VECTOR (outermode, result_v);
3296 else
3297 return result_s;
3298 }
3299
3300 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3301 Return 0 if no simplifications are possible. */
3302 rtx
3303 simplify_subreg (enum machine_mode outermode, rtx op,
3304 enum machine_mode innermode, unsigned int byte)
3305 {
3306 /* Little bit of sanity checking. */
3307 if (innermode == VOIDmode || outermode == VOIDmode
3308 || innermode == BLKmode || outermode == BLKmode)
3309 abort ();
3310
3311 if (GET_MODE (op) != innermode
3312 && GET_MODE (op) != VOIDmode)
3313 abort ();
3314
3315 if (byte % GET_MODE_SIZE (outermode)
3316 || byte >= GET_MODE_SIZE (innermode))
3317 abort ();
3318
3319 if (outermode == innermode && !byte)
3320 return op;
3321
3322 if (GET_CODE (op) == CONST_INT
3323 || GET_CODE (op) == CONST_DOUBLE
3324 || GET_CODE (op) == CONST_VECTOR)
3325 return simplify_immed_subreg (outermode, op, innermode, byte);
3326
3327 /* Changing mode twice with SUBREG => just change it once,
3328 or not at all if changing back op starting mode. */
3329 if (GET_CODE (op) == SUBREG)
3330 {
3331 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3332 int final_offset = byte + SUBREG_BYTE (op);
3333 rtx new;
3334
3335 if (outermode == innermostmode
3336 && byte == 0 && SUBREG_BYTE (op) == 0)
3337 return SUBREG_REG (op);
3338
3339 /* The SUBREG_BYTE represents offset, as if the value were stored
3340 in memory. Irritating exception is paradoxical subreg, where
3341 we define SUBREG_BYTE to be 0. On big endian machines, this
3342 value should be negative. For a moment, undo this exception. */
3343 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3344 {
3345 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3346 if (WORDS_BIG_ENDIAN)
3347 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3348 if (BYTES_BIG_ENDIAN)
3349 final_offset += difference % UNITS_PER_WORD;
3350 }
3351 if (SUBREG_BYTE (op) == 0
3352 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3353 {
3354 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3355 if (WORDS_BIG_ENDIAN)
3356 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3357 if (BYTES_BIG_ENDIAN)
3358 final_offset += difference % UNITS_PER_WORD;
3359 }
3360
3361 /* See whether resulting subreg will be paradoxical. */
3362 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3363 {
3364 /* In nonparadoxical subregs we can't handle negative offsets. */
3365 if (final_offset < 0)
3366 return NULL_RTX;
3367 /* Bail out in case resulting subreg would be incorrect. */
3368 if (final_offset % GET_MODE_SIZE (outermode)
3369 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3370 return NULL_RTX;
3371 }
3372 else
3373 {
3374 int offset = 0;
3375 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3376
3377 /* In paradoxical subreg, see if we are still looking on lower part.
3378 If so, our SUBREG_BYTE will be 0. */
3379 if (WORDS_BIG_ENDIAN)
3380 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3381 if (BYTES_BIG_ENDIAN)
3382 offset += difference % UNITS_PER_WORD;
3383 if (offset == final_offset)
3384 final_offset = 0;
3385 else
3386 return NULL_RTX;
3387 }
3388
3389 /* Recurse for further possible simplifications. */
3390 new = simplify_subreg (outermode, SUBREG_REG (op),
3391 GET_MODE (SUBREG_REG (op)),
3392 final_offset);
3393 if (new)
3394 return new;
3395 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3396 }
3397
3398 /* SUBREG of a hard register => just change the register number
3399 and/or mode. If the hard register is not valid in that mode,
3400 suppress this simplification. If the hard register is the stack,
3401 frame, or argument pointer, leave this as a SUBREG. */
3402
3403 if (REG_P (op)
3404 && (! REG_FUNCTION_VALUE_P (op)
3405 || ! rtx_equal_function_value_matters)
3406 && REGNO (op) < FIRST_PSEUDO_REGISTER
3407 #ifdef CANNOT_CHANGE_MODE_CLASS
3408 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3409 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3410 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3411 #endif
3412 && ((reload_completed && !frame_pointer_needed)
3413 || (REGNO (op) != FRAME_POINTER_REGNUM
3414 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3415 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3416 #endif
3417 ))
3418 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3419 && REGNO (op) != ARG_POINTER_REGNUM
3420 #endif
3421 && REGNO (op) != STACK_POINTER_REGNUM
3422 && subreg_offset_representable_p (REGNO (op), innermode,
3423 byte, outermode))
3424 {
3425 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3426 int final_regno = subreg_hard_regno (tem, 0);
3427
3428 /* ??? We do allow it if the current REG is not valid for
3429 its mode. This is a kludge to work around how float/complex
3430 arguments are passed on 32-bit SPARC and should be fixed. */
3431 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3432 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3433 {
3434 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3435
3436 /* Propagate original regno. We don't have any way to specify
3437 the offset inside original regno, so do so only for lowpart.
3438 The information is used only by alias analysis that can not
3439 grog partial register anyway. */
3440
3441 if (subreg_lowpart_offset (outermode, innermode) == byte)
3442 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3443 return x;
3444 }
3445 }
3446
3447 /* If we have a SUBREG of a register that we are replacing and we are
3448 replacing it with a MEM, make a new MEM and try replacing the
3449 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3450 or if we would be widening it. */
3451
3452 if (GET_CODE (op) == MEM
3453 && ! mode_dependent_address_p (XEXP (op, 0))
3454 /* Allow splitting of volatile memory references in case we don't
3455 have instruction to move the whole thing. */
3456 && (! MEM_VOLATILE_P (op)
3457 || ! have_insn_for (SET, innermode))
3458 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3459 return adjust_address_nv (op, outermode, byte);
3460
3461 /* Handle complex values represented as CONCAT
3462 of real and imaginary part. */
3463 if (GET_CODE (op) == CONCAT)
3464 {
3465 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3466 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3467 unsigned int final_offset;
3468 rtx res;
3469
3470 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3471 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3472 if (res)
3473 return res;
3474 /* We can at least simplify it by referring directly to the
3475 relevant part. */
3476 return gen_rtx_SUBREG (outermode, part, final_offset);
3477 }
3478
3479 /* Optimize SUBREG truncations of zero and sign extended values. */
3480 if ((GET_CODE (op) == ZERO_EXTEND
3481 || GET_CODE (op) == SIGN_EXTEND)
3482 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3483 {
3484 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3485
3486 /* If we're requesting the lowpart of a zero or sign extension,
3487 there are three possibilities. If the outermode is the same
3488 as the origmode, we can omit both the extension and the subreg.
3489 If the outermode is not larger than the origmode, we can apply
3490 the truncation without the extension. Finally, if the outermode
3491 is larger than the origmode, but both are integer modes, we
3492 can just extend to the appropriate mode. */
3493 if (bitpos == 0)
3494 {
3495 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3496 if (outermode == origmode)
3497 return XEXP (op, 0);
3498 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3499 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3500 subreg_lowpart_offset (outermode,
3501 origmode));
3502 if (SCALAR_INT_MODE_P (outermode))
3503 return simplify_gen_unary (GET_CODE (op), outermode,
3504 XEXP (op, 0), origmode);
3505 }
3506
3507 /* A SUBREG resulting from a zero extension may fold to zero if
3508 it extracts higher bits that the ZERO_EXTEND's source bits. */
3509 if (GET_CODE (op) == ZERO_EXTEND
3510 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3511 return CONST0_RTX (outermode);
3512 }
3513
3514 return NULL_RTX;
3515 }
3516
3517 /* Make a SUBREG operation or equivalent if it folds. */
3518
3519 rtx
3520 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3521 enum machine_mode innermode, unsigned int byte)
3522 {
3523 rtx new;
3524 /* Little bit of sanity checking. */
3525 if (innermode == VOIDmode || outermode == VOIDmode
3526 || innermode == BLKmode || outermode == BLKmode)
3527 abort ();
3528
3529 if (GET_MODE (op) != innermode
3530 && GET_MODE (op) != VOIDmode)
3531 abort ();
3532
3533 if (byte % GET_MODE_SIZE (outermode)
3534 || byte >= GET_MODE_SIZE (innermode))
3535 abort ();
3536
3537 if (GET_CODE (op) == QUEUED)
3538 return NULL_RTX;
3539
3540 new = simplify_subreg (outermode, op, innermode, byte);
3541 if (new)
3542 return new;
3543
3544 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3545 return NULL_RTX;
3546
3547 return gen_rtx_SUBREG (outermode, op, byte);
3548 }
3549 /* Simplify X, an rtx expression.
3550
3551 Return the simplified expression or NULL if no simplifications
3552 were possible.
3553
3554 This is the preferred entry point into the simplification routines;
3555 however, we still allow passes to call the more specific routines.
3556
3557 Right now GCC has three (yes, three) major bodies of RTL simplification
3558 code that need to be unified.
3559
3560 1. fold_rtx in cse.c. This code uses various CSE specific
3561 information to aid in RTL simplification.
3562
3563 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3564 it uses combine specific information to aid in RTL
3565 simplification.
3566
3567 3. The routines in this file.
3568
3569
3570 Long term we want to only have one body of simplification code; to
3571 get to that state I recommend the following steps:
3572
3573 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3574 which are not pass dependent state into these routines.
3575
3576 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3577 use this routine whenever possible.
3578
3579 3. Allow for pass dependent state to be provided to these
3580 routines and add simplifications based on the pass dependent
3581 state. Remove code from cse.c & combine.c that becomes
3582 redundant/dead.
3583
3584 It will take time, but ultimately the compiler will be easier to
3585 maintain and improve. It's totally silly that when we add a
3586 simplification that it needs to be added to 4 places (3 for RTL
3587 simplification and 1 for tree simplification. */
3588
3589 rtx
3590 simplify_rtx (rtx x)
3591 {
3592 enum rtx_code code = GET_CODE (x);
3593 enum machine_mode mode = GET_MODE (x);
3594 rtx temp;
3595
3596 switch (GET_RTX_CLASS (code))
3597 {
3598 case RTX_UNARY:
3599 return simplify_unary_operation (code, mode,
3600 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3601 case RTX_COMM_ARITH:
3602 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3603 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3604
3605 /* Fall through.... */
3606
3607 case RTX_BIN_ARITH:
3608 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3609
3610 case RTX_TERNARY:
3611 case RTX_BITFIELD_OPS:
3612 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3613 XEXP (x, 0), XEXP (x, 1),
3614 XEXP (x, 2));
3615
3616 case RTX_COMPARE:
3617 case RTX_COMM_COMPARE:
3618 temp = simplify_relational_operation (code, mode,
3619 ((GET_MODE (XEXP (x, 0))
3620 != VOIDmode)
3621 ? GET_MODE (XEXP (x, 0))
3622 : GET_MODE (XEXP (x, 1))),
3623 XEXP (x, 0), XEXP (x, 1));
3624 return temp;
3625
3626 case RTX_EXTRA:
3627 if (code == SUBREG)
3628 return simplify_gen_subreg (mode, SUBREG_REG (x),
3629 GET_MODE (SUBREG_REG (x)),
3630 SUBREG_BYTE (x));
3631 if (code == CONSTANT_P_RTX)
3632 {
3633 if (CONSTANT_P (XEXP (x, 0)))
3634 return const1_rtx;
3635 }
3636 break;
3637
3638 case RTX_OBJ:
3639 if (code == LO_SUM)
3640 {
3641 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3642 if (GET_CODE (XEXP (x, 0)) == HIGH
3643 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3644 return XEXP (x, 1);
3645 }
3646 break;
3647
3648 default:
3649 break;
3650 }
3651 return NULL;
3652 }