re PR rtl-optimization/12260 (ICE in output_operand: invalid expression as operand)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 \f
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
64 {
65 return gen_int_mode (- INTVAL (i), mode);
66 }
67
68 \f
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
71
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
75 {
76 rtx tem;
77
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
82
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
87
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
90
91 if (code == PLUS || code == MINUS)
92 {
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
96 }
97
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
99 }
100 \f
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
103 rtx
104 avoid_constant_pool_reference (rtx x)
105 {
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
108
109 switch (GET_CODE (x))
110 {
111 case MEM:
112 break;
113
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
119 {
120 REAL_VALUE_TYPE d;
121
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
124 }
125 return x;
126
127 default:
128 return x;
129 }
130
131 addr = XEXP (x, 0);
132
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
135
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
138
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
142
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
145
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
150 {
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
153 }
154
155 return c;
156 }
157 \f
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
160
161 rtx
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
164 {
165 rtx tem;
166
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
170
171 return gen_rtx_fmt_e (code, mode, op);
172 }
173
174 /* Likewise for ternary operations. */
175
176 rtx
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
179 {
180 rtx tem;
181
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
186
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
188 }
189 \f
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
192 */
193
194 rtx
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
197 {
198 rtx tem;
199
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
204
205 if (cmp_mode != VOIDmode)
206 {
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
208
209 if (tem)
210 {
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
213 {
214 REAL_VALUE_TYPE val;
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
218 abort ();
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
221 }
222 #endif
223 return tem;
224 }
225 }
226
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
231
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
236
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
239 {
240 if (code == NE)
241 {
242 if (GET_MODE (op0) == mode)
243 return op0;
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
246 }
247 else if (code == EQ)
248 {
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
250 if (new != UNKNOWN)
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
253 }
254 }
255
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
257 }
258 \f
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
261
262 rtx
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
264 {
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
271
272 if (x == old)
273 return new;
274
275 switch (GET_RTX_CLASS (code))
276 {
277 case '1':
278 {
279 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
280 rtx op = (XEXP (x, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
282
283 return simplify_gen_unary (code, mode, op, op_mode);
284 }
285
286 case '2':
287 case 'c':
288 return
289 simplify_gen_binary (code, mode,
290 simplify_replace_rtx (XEXP (x, 0), old, new),
291 simplify_replace_rtx (XEXP (x, 1), old, new));
292 case '<':
293 {
294 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x, 0))
296 : GET_MODE (XEXP (x, 1)));
297 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
298 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 }
301
302 case '3':
303 case 'b':
304 {
305 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
306 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307
308 return
309 simplify_gen_ternary (code, mode,
310 (op_mode != VOIDmode
311 ? op_mode
312 : GET_MODE (op0)),
313 op0,
314 simplify_replace_rtx (XEXP (x, 1), old, new),
315 simplify_replace_rtx (XEXP (x, 2), old, new));
316 }
317
318 case 'x':
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 rtx exp;
323 exp = simplify_gen_subreg (GET_MODE (x),
324 simplify_replace_rtx (SUBREG_REG (x),
325 old, new),
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 if (exp)
329 x = exp;
330 }
331 return x;
332
333 case 'o':
334 if (code == MEM)
335 return replace_equiv_address_nv (x,
336 simplify_replace_rtx (XEXP (x, 0),
337 old, new));
338 else if (code == LO_SUM)
339 {
340 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
341 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
345 return op1;
346
347 return gen_rtx_LO_SUM (mode, op0, op1);
348 }
349 else if (code == REG)
350 {
351 if (REG_P (old) && REGNO (x) == REGNO (old))
352 return new;
353 }
354
355 return x;
356
357 default:
358 return x;
359 }
360 return x;
361 }
362 \f
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
366 rtx
367 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
368 rtx op, enum machine_mode op_mode)
369 {
370 unsigned int width = GET_MODE_BITSIZE (mode);
371 rtx trueop = avoid_constant_pool_reference (op);
372
373 if (code == VEC_DUPLICATE)
374 {
375 if (!VECTOR_MODE_P (mode))
376 abort ();
377 if (GET_MODE (trueop) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop))
379 && GET_MODE_INNER (mode) != GET_MODE (trueop))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
384 abort ();
385 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
386 || GET_CODE (trueop) == CONST_VECTOR)
387 {
388 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
389 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
390 rtvec v = rtvec_alloc (n_elts);
391 unsigned int i;
392
393 if (GET_CODE (trueop) != CONST_VECTOR)
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = trueop;
396 else
397 {
398 enum machine_mode inmode = GET_MODE (trueop);
399 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
400 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
401
402 if (in_n_elts >= n_elts || n_elts % in_n_elts)
403 abort ();
404 for (i = 0; i < n_elts; i++)
405 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
406 }
407 return gen_rtx_CONST_VECTOR (mode, v);
408 }
409 }
410 else if (GET_CODE (op) == CONST)
411 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
412
413 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
414 {
415 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
416 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
417 enum machine_mode opmode = GET_MODE (trueop);
418 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
419 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
420 rtvec v = rtvec_alloc (n_elts);
421 unsigned int i;
422
423 if (op_n_elts != n_elts)
424 abort ();
425
426 for (i = 0; i < n_elts; i++)
427 {
428 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
429 CONST_VECTOR_ELT (trueop, i),
430 GET_MODE_INNER (opmode));
431 if (!x)
432 return 0;
433 RTVEC_ELT (v, i) = x;
434 }
435 return gen_rtx_CONST_VECTOR (mode, v);
436 }
437
438 /* The order of these tests is critical so that, for example, we don't
439 check the wrong mode (input vs. output) for a conversion operation,
440 such as FIX. At some point, this should be simplified. */
441
442 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
444 {
445 HOST_WIDE_INT hv, lv;
446 REAL_VALUE_TYPE d;
447
448 if (GET_CODE (trueop) == CONST_INT)
449 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450 else
451 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
452
453 REAL_VALUE_FROM_INT (d, lv, hv, mode);
454 d = real_value_truncate (mode, d);
455 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
456 }
457 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
458 && (GET_CODE (trueop) == CONST_DOUBLE
459 || GET_CODE (trueop) == CONST_INT))
460 {
461 HOST_WIDE_INT hv, lv;
462 REAL_VALUE_TYPE d;
463
464 if (GET_CODE (trueop) == CONST_INT)
465 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
466 else
467 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
468
469 if (op_mode == VOIDmode)
470 {
471 /* We don't know how to interpret negative-looking numbers in
472 this case, so don't try to fold those. */
473 if (hv < 0)
474 return 0;
475 }
476 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
477 ;
478 else
479 hv = 0, lv &= GET_MODE_MASK (op_mode);
480
481 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
482 d = real_value_truncate (mode, d);
483 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
484 }
485
486 if (GET_CODE (trueop) == CONST_INT
487 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
488 {
489 HOST_WIDE_INT arg0 = INTVAL (trueop);
490 HOST_WIDE_INT val;
491
492 switch (code)
493 {
494 case NOT:
495 val = ~ arg0;
496 break;
497
498 case NEG:
499 val = - arg0;
500 break;
501
502 case ABS:
503 val = (arg0 >= 0 ? arg0 : - arg0);
504 break;
505
506 case FFS:
507 /* Don't use ffs here. Instead, get low order bit and then its
508 number. If arg0 is zero, this will return 0, as desired. */
509 arg0 &= GET_MODE_MASK (mode);
510 val = exact_log2 (arg0 & (- arg0)) + 1;
511 break;
512
513 case CLZ:
514 arg0 &= GET_MODE_MASK (mode);
515 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 ;
517 else
518 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
519 break;
520
521 case CTZ:
522 arg0 &= GET_MODE_MASK (mode);
523 if (arg0 == 0)
524 {
525 /* Even if the value at zero is undefined, we have to come
526 up with some replacement. Seems good enough. */
527 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
528 val = GET_MODE_BITSIZE (mode);
529 }
530 else
531 val = exact_log2 (arg0 & -arg0);
532 break;
533
534 case POPCOUNT:
535 arg0 &= GET_MODE_MASK (mode);
536 val = 0;
537 while (arg0)
538 val++, arg0 &= arg0 - 1;
539 break;
540
541 case PARITY:
542 arg0 &= GET_MODE_MASK (mode);
543 val = 0;
544 while (arg0)
545 val++, arg0 &= arg0 - 1;
546 val &= 1;
547 break;
548
549 case TRUNCATE:
550 val = arg0;
551 break;
552
553 case ZERO_EXTEND:
554 /* When zero-extending a CONST_INT, we need to know its
555 original mode. */
556 if (op_mode == VOIDmode)
557 abort ();
558 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
559 {
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 if (width != GET_MODE_BITSIZE (op_mode))
564 abort ();
565 val = arg0;
566 }
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
569 else
570 return 0;
571 break;
572
573 case SIGN_EXTEND:
574 if (op_mode == VOIDmode)
575 op_mode = mode;
576 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
577 {
578 /* If we were really extending the mode,
579 we would have to distinguish between zero-extension
580 and sign-extension. */
581 if (width != GET_MODE_BITSIZE (op_mode))
582 abort ();
583 val = arg0;
584 }
585 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
586 {
587 val
588 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
589 if (val
590 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
591 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
592 }
593 else
594 return 0;
595 break;
596
597 case SQRT:
598 case FLOAT_EXTEND:
599 case FLOAT_TRUNCATE:
600 case SS_TRUNCATE:
601 case US_TRUNCATE:
602 return 0;
603
604 default:
605 abort ();
606 }
607
608 val = trunc_int_for_mode (val, mode);
609
610 return GEN_INT (val);
611 }
612
613 /* We can do some operations on integer CONST_DOUBLEs. Also allow
614 for a DImode operation on a CONST_INT. */
615 else if (GET_MODE (trueop) == VOIDmode
616 && width <= HOST_BITS_PER_WIDE_INT * 2
617 && (GET_CODE (trueop) == CONST_DOUBLE
618 || GET_CODE (trueop) == CONST_INT))
619 {
620 unsigned HOST_WIDE_INT l1, lv;
621 HOST_WIDE_INT h1, hv;
622
623 if (GET_CODE (trueop) == CONST_DOUBLE)
624 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
625 else
626 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
627
628 switch (code)
629 {
630 case NOT:
631 lv = ~ l1;
632 hv = ~ h1;
633 break;
634
635 case NEG:
636 neg_double (l1, h1, &lv, &hv);
637 break;
638
639 case ABS:
640 if (h1 < 0)
641 neg_double (l1, h1, &lv, &hv);
642 else
643 lv = l1, hv = h1;
644 break;
645
646 case FFS:
647 hv = 0;
648 if (l1 == 0)
649 {
650 if (h1 == 0)
651 lv = 0;
652 else
653 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
654 }
655 else
656 lv = exact_log2 (l1 & -l1) + 1;
657 break;
658
659 case CLZ:
660 hv = 0;
661 if (h1 != 0)
662 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
663 - HOST_BITS_PER_WIDE_INT;
664 else if (l1 != 0)
665 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
666 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
667 lv = GET_MODE_BITSIZE (mode);
668 break;
669
670 case CTZ:
671 hv = 0;
672 if (l1 != 0)
673 lv = exact_log2 (l1 & -l1);
674 else if (h1 != 0)
675 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
676 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
677 lv = GET_MODE_BITSIZE (mode);
678 break;
679
680 case POPCOUNT:
681 hv = 0;
682 lv = 0;
683 while (l1)
684 lv++, l1 &= l1 - 1;
685 while (h1)
686 lv++, h1 &= h1 - 1;
687 break;
688
689 case PARITY:
690 hv = 0;
691 lv = 0;
692 while (l1)
693 lv++, l1 &= l1 - 1;
694 while (h1)
695 lv++, h1 &= h1 - 1;
696 lv &= 1;
697 break;
698
699 case TRUNCATE:
700 /* This is just a change-of-mode, so do nothing. */
701 lv = l1, hv = h1;
702 break;
703
704 case ZERO_EXTEND:
705 if (op_mode == VOIDmode)
706 abort ();
707
708 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
709 return 0;
710
711 hv = 0;
712 lv = l1 & GET_MODE_MASK (op_mode);
713 break;
714
715 case SIGN_EXTEND:
716 if (op_mode == VOIDmode
717 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
718 return 0;
719 else
720 {
721 lv = l1 & GET_MODE_MASK (op_mode);
722 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
723 && (lv & ((HOST_WIDE_INT) 1
724 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
725 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
726
727 hv = HWI_SIGN_EXTEND (lv);
728 }
729 break;
730
731 case SQRT:
732 return 0;
733
734 default:
735 return 0;
736 }
737
738 return immed_double_const (lv, hv, mode);
739 }
740
741 else if (GET_CODE (trueop) == CONST_DOUBLE
742 && GET_MODE_CLASS (mode) == MODE_FLOAT)
743 {
744 REAL_VALUE_TYPE d, t;
745 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
746
747 switch (code)
748 {
749 case SQRT:
750 if (HONOR_SNANS (mode) && real_isnan (&d))
751 return 0;
752 real_sqrt (&t, mode, &d);
753 d = t;
754 break;
755 case ABS:
756 d = REAL_VALUE_ABS (d);
757 break;
758 case NEG:
759 d = REAL_VALUE_NEGATE (d);
760 break;
761 case FLOAT_TRUNCATE:
762 d = real_value_truncate (mode, d);
763 break;
764 case FLOAT_EXTEND:
765 /* All this does is change the mode. */
766 break;
767 case FIX:
768 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
769 break;
770
771 default:
772 abort ();
773 }
774 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
775 }
776
777 else if (GET_CODE (trueop) == CONST_DOUBLE
778 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
779 && GET_MODE_CLASS (mode) == MODE_INT
780 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
781 {
782 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
783 operators are intentionally left unspecified (to ease implementation
784 by target backends), for consistency, this routine implements the
785 same semantics for constant folding as used by the middle-end. */
786
787 HOST_WIDE_INT xh, xl, th, tl;
788 REAL_VALUE_TYPE x, t;
789 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
790 switch (code)
791 {
792 case FIX:
793 if (REAL_VALUE_ISNAN (x))
794 return const0_rtx;
795
796 /* Test against the signed upper bound. */
797 if (width > HOST_BITS_PER_WIDE_INT)
798 {
799 th = ((unsigned HOST_WIDE_INT) 1
800 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
801 tl = -1;
802 }
803 else
804 {
805 th = 0;
806 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
807 }
808 real_from_integer (&t, VOIDmode, tl, th, 0);
809 if (REAL_VALUES_LESS (t, x))
810 {
811 xh = th;
812 xl = tl;
813 break;
814 }
815
816 /* Test against the signed lower bound. */
817 if (width > HOST_BITS_PER_WIDE_INT)
818 {
819 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
820 tl = 0;
821 }
822 else
823 {
824 th = -1;
825 tl = (HOST_WIDE_INT) -1 << (width - 1);
826 }
827 real_from_integer (&t, VOIDmode, tl, th, 0);
828 if (REAL_VALUES_LESS (x, t))
829 {
830 xh = th;
831 xl = tl;
832 break;
833 }
834 REAL_VALUE_TO_INT (&xl, &xh, x);
835 break;
836
837 case UNSIGNED_FIX:
838 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
839 return const0_rtx;
840
841 /* Test against the unsigned upper bound. */
842 if (width == 2*HOST_BITS_PER_WIDE_INT)
843 {
844 th = -1;
845 tl = -1;
846 }
847 else if (width >= HOST_BITS_PER_WIDE_INT)
848 {
849 th = ((unsigned HOST_WIDE_INT) 1
850 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
851 tl = -1;
852 }
853 else
854 {
855 th = 0;
856 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
857 }
858 real_from_integer (&t, VOIDmode, tl, th, 1);
859 if (REAL_VALUES_LESS (t, x))
860 {
861 xh = th;
862 xl = tl;
863 break;
864 }
865
866 REAL_VALUE_TO_INT (&xl, &xh, x);
867 break;
868
869 default:
870 abort ();
871 }
872 return immed_double_const (xl, xh, mode);
873 }
874
875 /* This was formerly used only for non-IEEE float.
876 eggert@twinsun.com says it is safe for IEEE also. */
877 else
878 {
879 enum rtx_code reversed;
880 rtx temp;
881
882 /* There are some simplifications we can do even if the operands
883 aren't constant. */
884 switch (code)
885 {
886 case NOT:
887 /* (not (not X)) == X. */
888 if (GET_CODE (op) == NOT)
889 return XEXP (op, 0);
890
891 /* (not (eq X Y)) == (ne X Y), etc. */
892 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
893 && (mode == BImode || STORE_FLAG_VALUE == -1)
894 && ((reversed = reversed_comparison_code (op, NULL_RTX))
895 != UNKNOWN))
896 return simplify_gen_relational (reversed, mode, VOIDmode,
897 XEXP (op, 0), XEXP (op, 1));
898
899 /* (not (plus X -1)) can become (neg X). */
900 if (GET_CODE (op) == PLUS
901 && XEXP (op, 1) == constm1_rtx)
902 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
903
904 /* Similarly, (not (neg X)) is (plus X -1). */
905 if (GET_CODE (op) == NEG)
906 return plus_constant (XEXP (op, 0), -1);
907
908 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
909 if (GET_CODE (op) == XOR
910 && GET_CODE (XEXP (op, 1)) == CONST_INT
911 && (temp = simplify_unary_operation (NOT, mode,
912 XEXP (op, 1),
913 mode)) != 0)
914 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
915
916
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
924 {
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
927 }
928
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && GET_RTX_CLASS (GET_CODE (op)) == '<'
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
937
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
941
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
948
949 break;
950
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
955
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
964
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
975
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 {
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
983 {
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
989 }
990
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
994 }
995
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 {
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1003 }
1004
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1009 {
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1015 }
1016
1017 break;
1018
1019 case SIGN_EXTEND:
1020 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1021 becomes just the MINUS if its mode is MODE. This allows
1022 folding switch statements on machines using casesi (such as
1023 the VAX). */
1024 if (GET_CODE (op) == TRUNCATE
1025 && GET_MODE (XEXP (op, 0)) == mode
1026 && GET_CODE (XEXP (op, 0)) == MINUS
1027 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1028 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1029 return XEXP (op, 0);
1030
1031 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1032 if (! POINTERS_EXTEND_UNSIGNED
1033 && mode == Pmode && GET_MODE (op) == ptr_mode
1034 && (CONSTANT_P (op)
1035 || (GET_CODE (op) == SUBREG
1036 && GET_CODE (SUBREG_REG (op)) == REG
1037 && REG_POINTER (SUBREG_REG (op))
1038 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1039 return convert_memory_address (Pmode, op);
1040 #endif
1041 break;
1042
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 case ZERO_EXTEND:
1045 if (POINTERS_EXTEND_UNSIGNED > 0
1046 && mode == Pmode && GET_MODE (op) == ptr_mode
1047 && (CONSTANT_P (op)
1048 || (GET_CODE (op) == SUBREG
1049 && GET_CODE (SUBREG_REG (op)) == REG
1050 && REG_POINTER (SUBREG_REG (op))
1051 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1052 return convert_memory_address (Pmode, op);
1053 break;
1054 #endif
1055
1056 default:
1057 break;
1058 }
1059
1060 return 0;
1061 }
1062 }
1063 \f
1064 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1065 is a suitable integer or floating point immediate constant. */
1066 static bool
1067 associative_constant_p (rtx op)
1068 {
1069 if (GET_CODE (op) == CONST_INT
1070 || GET_CODE (op) == CONST_DOUBLE)
1071 return true;
1072 op = avoid_constant_pool_reference (op);
1073 return GET_CODE (op) == CONST_INT
1074 || GET_CODE (op) == CONST_DOUBLE;
1075 }
1076
1077 /* Subroutine of simplify_binary_operation to simplify an associative
1078 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1079 Return 0 if no simplification is possible. */
1080 static rtx
1081 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1082 rtx op0, rtx op1)
1083 {
1084 rtx tem;
1085
1086 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1087 if (GET_CODE (op0) == code
1088 && associative_constant_p (op1)
1089 && associative_constant_p (XEXP (op0, 1)))
1090 {
1091 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1092 if (! tem)
1093 return tem;
1094 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1095 }
1096
1097 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1098 if (GET_CODE (op0) == code
1099 && GET_CODE (op1) == code
1100 && associative_constant_p (XEXP (op0, 1))
1101 && associative_constant_p (XEXP (op1, 1)))
1102 {
1103 rtx c = simplify_binary_operation (code, mode,
1104 XEXP (op0, 1), XEXP (op1, 1));
1105 if (! c)
1106 return 0;
1107 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1108 return simplify_gen_binary (code, mode, tem, c);
1109 }
1110
1111 /* Canonicalize (x op c) op y as (x op y) op c. */
1112 if (GET_CODE (op0) == code
1113 && associative_constant_p (XEXP (op0, 1)))
1114 {
1115 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1116 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1117 }
1118
1119 /* Canonicalize x op (y op c) as (x op y) op c. */
1120 if (GET_CODE (op1) == code
1121 && associative_constant_p (XEXP (op1, 1)))
1122 {
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1125 }
1126
1127 return 0;
1128 }
1129
1130 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1131 and OP1. Return 0 if no simplification is possible.
1132
1133 Don't use this for relational operations such as EQ or LT.
1134 Use simplify_relational_operation instead. */
1135 rtx
1136 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1137 rtx op0, rtx op1)
1138 {
1139 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1140 HOST_WIDE_INT val;
1141 unsigned int width = GET_MODE_BITSIZE (mode);
1142 rtx tem;
1143 rtx trueop0 = avoid_constant_pool_reference (op0);
1144 rtx trueop1 = avoid_constant_pool_reference (op1);
1145
1146 /* Relational operations don't work here. We must know the mode
1147 of the operands in order to do the comparison correctly.
1148 Assuming a full word can give incorrect results.
1149 Consider comparing 128 with -128 in QImode. */
1150
1151 if (GET_RTX_CLASS (code) == '<')
1152 abort ();
1153
1154 /* Make sure the constant is second. */
1155 if (GET_RTX_CLASS (code) == 'c'
1156 && swap_commutative_operands_p (trueop0, trueop1))
1157 {
1158 tem = op0, op0 = op1, op1 = tem;
1159 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1160 }
1161
1162 if (VECTOR_MODE_P (mode)
1163 && GET_CODE (trueop0) == CONST_VECTOR
1164 && GET_CODE (trueop1) == CONST_VECTOR)
1165 {
1166 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1167 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1168 enum machine_mode op0mode = GET_MODE (trueop0);
1169 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1170 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1171 enum machine_mode op1mode = GET_MODE (trueop1);
1172 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1173 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1174 rtvec v = rtvec_alloc (n_elts);
1175 unsigned int i;
1176
1177 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1178 abort ();
1179
1180 for (i = 0; i < n_elts; i++)
1181 {
1182 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1183 CONST_VECTOR_ELT (trueop0, i),
1184 CONST_VECTOR_ELT (trueop1, i));
1185 if (!x)
1186 return 0;
1187 RTVEC_ELT (v, i) = x;
1188 }
1189
1190 return gen_rtx_CONST_VECTOR (mode, v);
1191 }
1192
1193 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1194 && GET_CODE (trueop0) == CONST_DOUBLE
1195 && GET_CODE (trueop1) == CONST_DOUBLE
1196 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1197 {
1198 REAL_VALUE_TYPE f0, f1, value;
1199
1200 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1201 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1202 f0 = real_value_truncate (mode, f0);
1203 f1 = real_value_truncate (mode, f1);
1204
1205 if (HONOR_SNANS (mode)
1206 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1207 return 0;
1208
1209 if (code == DIV
1210 && REAL_VALUES_EQUAL (f1, dconst0)
1211 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1212 return 0;
1213
1214 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1215
1216 value = real_value_truncate (mode, value);
1217 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1218 }
1219
1220 /* We can fold some multi-word operations. */
1221 if (GET_MODE_CLASS (mode) == MODE_INT
1222 && width == HOST_BITS_PER_WIDE_INT * 2
1223 && (GET_CODE (trueop0) == CONST_DOUBLE
1224 || GET_CODE (trueop0) == CONST_INT)
1225 && (GET_CODE (trueop1) == CONST_DOUBLE
1226 || GET_CODE (trueop1) == CONST_INT))
1227 {
1228 unsigned HOST_WIDE_INT l1, l2, lv;
1229 HOST_WIDE_INT h1, h2, hv;
1230
1231 if (GET_CODE (trueop0) == CONST_DOUBLE)
1232 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1233 else
1234 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1235
1236 if (GET_CODE (trueop1) == CONST_DOUBLE)
1237 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1238 else
1239 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1240
1241 switch (code)
1242 {
1243 case MINUS:
1244 /* A - B == A + (-B). */
1245 neg_double (l2, h2, &lv, &hv);
1246 l2 = lv, h2 = hv;
1247
1248 /* Fall through.... */
1249
1250 case PLUS:
1251 add_double (l1, h1, l2, h2, &lv, &hv);
1252 break;
1253
1254 case MULT:
1255 mul_double (l1, h1, l2, h2, &lv, &hv);
1256 break;
1257
1258 case DIV: case MOD: case UDIV: case UMOD:
1259 /* We'd need to include tree.h to do this and it doesn't seem worth
1260 it. */
1261 return 0;
1262
1263 case AND:
1264 lv = l1 & l2, hv = h1 & h2;
1265 break;
1266
1267 case IOR:
1268 lv = l1 | l2, hv = h1 | h2;
1269 break;
1270
1271 case XOR:
1272 lv = l1 ^ l2, hv = h1 ^ h2;
1273 break;
1274
1275 case SMIN:
1276 if (h1 < h2
1277 || (h1 == h2
1278 && ((unsigned HOST_WIDE_INT) l1
1279 < (unsigned HOST_WIDE_INT) l2)))
1280 lv = l1, hv = h1;
1281 else
1282 lv = l2, hv = h2;
1283 break;
1284
1285 case SMAX:
1286 if (h1 > h2
1287 || (h1 == h2
1288 && ((unsigned HOST_WIDE_INT) l1
1289 > (unsigned HOST_WIDE_INT) l2)))
1290 lv = l1, hv = h1;
1291 else
1292 lv = l2, hv = h2;
1293 break;
1294
1295 case UMIN:
1296 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1297 || (h1 == h2
1298 && ((unsigned HOST_WIDE_INT) l1
1299 < (unsigned HOST_WIDE_INT) l2)))
1300 lv = l1, hv = h1;
1301 else
1302 lv = l2, hv = h2;
1303 break;
1304
1305 case UMAX:
1306 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1307 || (h1 == h2
1308 && ((unsigned HOST_WIDE_INT) l1
1309 > (unsigned HOST_WIDE_INT) l2)))
1310 lv = l1, hv = h1;
1311 else
1312 lv = l2, hv = h2;
1313 break;
1314
1315 case LSHIFTRT: case ASHIFTRT:
1316 case ASHIFT:
1317 case ROTATE: case ROTATERT:
1318 #ifdef SHIFT_COUNT_TRUNCATED
1319 if (SHIFT_COUNT_TRUNCATED)
1320 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1321 #endif
1322
1323 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1324 return 0;
1325
1326 if (code == LSHIFTRT || code == ASHIFTRT)
1327 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1328 code == ASHIFTRT);
1329 else if (code == ASHIFT)
1330 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1331 else if (code == ROTATE)
1332 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1333 else /* code == ROTATERT */
1334 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1335 break;
1336
1337 default:
1338 return 0;
1339 }
1340
1341 return immed_double_const (lv, hv, mode);
1342 }
1343
1344 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1345 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1346 {
1347 /* Even if we can't compute a constant result,
1348 there are some cases worth simplifying. */
1349
1350 switch (code)
1351 {
1352 case PLUS:
1353 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1354 when x is NaN, infinite, or finite and nonzero. They aren't
1355 when x is -0 and the rounding mode is not towards -infinity,
1356 since (-0) + 0 is then 0. */
1357 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1358 return op0;
1359
1360 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1361 transformations are safe even for IEEE. */
1362 if (GET_CODE (op0) == NEG)
1363 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1364 else if (GET_CODE (op1) == NEG)
1365 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1366
1367 /* (~a) + 1 -> -a */
1368 if (INTEGRAL_MODE_P (mode)
1369 && GET_CODE (op0) == NOT
1370 && trueop1 == const1_rtx)
1371 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1372
1373 /* Handle both-operands-constant cases. We can only add
1374 CONST_INTs to constants since the sum of relocatable symbols
1375 can't be handled by most assemblers. Don't add CONST_INT
1376 to CONST_INT since overflow won't be computed properly if wider
1377 than HOST_BITS_PER_WIDE_INT. */
1378
1379 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1380 && GET_CODE (op1) == CONST_INT)
1381 return plus_constant (op0, INTVAL (op1));
1382 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1383 && GET_CODE (op0) == CONST_INT)
1384 return plus_constant (op1, INTVAL (op0));
1385
1386 /* See if this is something like X * C - X or vice versa or
1387 if the multiplication is written as a shift. If so, we can
1388 distribute and make a new multiply, shift, or maybe just
1389 have X (if C is 2 in the example above). But don't make
1390 real multiply if we didn't have one before. */
1391
1392 if (! FLOAT_MODE_P (mode))
1393 {
1394 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1395 rtx lhs = op0, rhs = op1;
1396 int had_mult = 0;
1397
1398 if (GET_CODE (lhs) == NEG)
1399 coeff0 = -1, lhs = XEXP (lhs, 0);
1400 else if (GET_CODE (lhs) == MULT
1401 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1402 {
1403 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1404 had_mult = 1;
1405 }
1406 else if (GET_CODE (lhs) == ASHIFT
1407 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1408 && INTVAL (XEXP (lhs, 1)) >= 0
1409 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1410 {
1411 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1412 lhs = XEXP (lhs, 0);
1413 }
1414
1415 if (GET_CODE (rhs) == NEG)
1416 coeff1 = -1, rhs = XEXP (rhs, 0);
1417 else if (GET_CODE (rhs) == MULT
1418 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1419 {
1420 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1421 had_mult = 1;
1422 }
1423 else if (GET_CODE (rhs) == ASHIFT
1424 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1425 && INTVAL (XEXP (rhs, 1)) >= 0
1426 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1427 {
1428 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1429 rhs = XEXP (rhs, 0);
1430 }
1431
1432 if (rtx_equal_p (lhs, rhs))
1433 {
1434 tem = simplify_gen_binary (MULT, mode, lhs,
1435 GEN_INT (coeff0 + coeff1));
1436 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1437 }
1438 }
1439
1440 /* If one of the operands is a PLUS or a MINUS, see if we can
1441 simplify this by the associative law.
1442 Don't use the associative law for floating point.
1443 The inaccuracy makes it nonassociative,
1444 and subtle programs can break if operations are associated. */
1445
1446 if (INTEGRAL_MODE_P (mode)
1447 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1448 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1449 || (GET_CODE (op0) == CONST
1450 && GET_CODE (XEXP (op0, 0)) == PLUS)
1451 || (GET_CODE (op1) == CONST
1452 && GET_CODE (XEXP (op1, 0)) == PLUS))
1453 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1454 return tem;
1455
1456 /* Reassociate floating point addition only when the user
1457 specifies unsafe math optimizations. */
1458 if (FLOAT_MODE_P (mode)
1459 && flag_unsafe_math_optimizations)
1460 {
1461 tem = simplify_associative_operation (code, mode, op0, op1);
1462 if (tem)
1463 return tem;
1464 }
1465 break;
1466
1467 case COMPARE:
1468 #ifdef HAVE_cc0
1469 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1470 using cc0, in which case we want to leave it as a COMPARE
1471 so we can distinguish it from a register-register-copy.
1472
1473 In IEEE floating point, x-0 is not the same as x. */
1474
1475 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1476 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1477 && trueop1 == CONST0_RTX (mode))
1478 return op0;
1479 #endif
1480
1481 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1482 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1483 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1484 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1485 {
1486 rtx xop00 = XEXP (op0, 0);
1487 rtx xop10 = XEXP (op1, 0);
1488
1489 #ifdef HAVE_cc0
1490 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1491 #else
1492 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1493 && GET_MODE (xop00) == GET_MODE (xop10)
1494 && REGNO (xop00) == REGNO (xop10)
1495 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1496 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1497 #endif
1498 return xop00;
1499 }
1500 break;
1501
1502 case MINUS:
1503 /* We can't assume x-x is 0 even with non-IEEE floating point,
1504 but since it is zero except in very strange circumstances, we
1505 will treat it as zero with -funsafe-math-optimizations. */
1506 if (rtx_equal_p (trueop0, trueop1)
1507 && ! side_effects_p (op0)
1508 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1509 return CONST0_RTX (mode);
1510
1511 /* Change subtraction from zero into negation. (0 - x) is the
1512 same as -x when x is NaN, infinite, or finite and nonzero.
1513 But if the mode has signed zeros, and does not round towards
1514 -infinity, then 0 - 0 is 0, not -0. */
1515 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1516 return simplify_gen_unary (NEG, mode, op1, mode);
1517
1518 /* (-1 - a) is ~a. */
1519 if (trueop0 == constm1_rtx)
1520 return simplify_gen_unary (NOT, mode, op1, mode);
1521
1522 /* Subtracting 0 has no effect unless the mode has signed zeros
1523 and supports rounding towards -infinity. In such a case,
1524 0 - 0 is -0. */
1525 if (!(HONOR_SIGNED_ZEROS (mode)
1526 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1527 && trueop1 == CONST0_RTX (mode))
1528 return op0;
1529
1530 /* See if this is something like X * C - X or vice versa or
1531 if the multiplication is written as a shift. If so, we can
1532 distribute and make a new multiply, shift, or maybe just
1533 have X (if C is 2 in the example above). But don't make
1534 real multiply if we didn't have one before. */
1535
1536 if (! FLOAT_MODE_P (mode))
1537 {
1538 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1539 rtx lhs = op0, rhs = op1;
1540 int had_mult = 0;
1541
1542 if (GET_CODE (lhs) == NEG)
1543 coeff0 = -1, lhs = XEXP (lhs, 0);
1544 else if (GET_CODE (lhs) == MULT
1545 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1546 {
1547 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1548 had_mult = 1;
1549 }
1550 else if (GET_CODE (lhs) == ASHIFT
1551 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1552 && INTVAL (XEXP (lhs, 1)) >= 0
1553 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1554 {
1555 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1556 lhs = XEXP (lhs, 0);
1557 }
1558
1559 if (GET_CODE (rhs) == NEG)
1560 coeff1 = - 1, rhs = XEXP (rhs, 0);
1561 else if (GET_CODE (rhs) == MULT
1562 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1563 {
1564 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1565 had_mult = 1;
1566 }
1567 else if (GET_CODE (rhs) == ASHIFT
1568 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1569 && INTVAL (XEXP (rhs, 1)) >= 0
1570 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1571 {
1572 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1573 rhs = XEXP (rhs, 0);
1574 }
1575
1576 if (rtx_equal_p (lhs, rhs))
1577 {
1578 tem = simplify_gen_binary (MULT, mode, lhs,
1579 GEN_INT (coeff0 - coeff1));
1580 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1581 }
1582 }
1583
1584 /* (a - (-b)) -> (a + b). True even for IEEE. */
1585 if (GET_CODE (op1) == NEG)
1586 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1587
1588 /* (-x - c) may be simplified as (-c - x). */
1589 if (GET_CODE (op0) == NEG
1590 && (GET_CODE (op1) == CONST_INT
1591 || GET_CODE (op1) == CONST_DOUBLE))
1592 {
1593 tem = simplify_unary_operation (NEG, mode, op1, mode);
1594 if (tem)
1595 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1596 }
1597
1598 /* If one of the operands is a PLUS or a MINUS, see if we can
1599 simplify this by the associative law.
1600 Don't use the associative law for floating point.
1601 The inaccuracy makes it nonassociative,
1602 and subtle programs can break if operations are associated. */
1603
1604 if (INTEGRAL_MODE_P (mode)
1605 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1606 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1607 || (GET_CODE (op0) == CONST
1608 && GET_CODE (XEXP (op0, 0)) == PLUS)
1609 || (GET_CODE (op1) == CONST
1610 && GET_CODE (XEXP (op1, 0)) == PLUS))
1611 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1612 return tem;
1613
1614 /* Don't let a relocatable value get a negative coeff. */
1615 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1616 return simplify_gen_binary (PLUS, mode,
1617 op0,
1618 neg_const_int (mode, op1));
1619
1620 /* (x - (x & y)) -> (x & ~y) */
1621 if (GET_CODE (op1) == AND)
1622 {
1623 if (rtx_equal_p (op0, XEXP (op1, 0)))
1624 {
1625 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1626 GET_MODE (XEXP (op1, 1)));
1627 return simplify_gen_binary (AND, mode, op0, tem);
1628 }
1629 if (rtx_equal_p (op0, XEXP (op1, 1)))
1630 {
1631 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1632 GET_MODE (XEXP (op1, 0)));
1633 return simplify_gen_binary (AND, mode, op0, tem);
1634 }
1635 }
1636 break;
1637
1638 case MULT:
1639 if (trueop1 == constm1_rtx)
1640 return simplify_gen_unary (NEG, mode, op0, mode);
1641
1642 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1643 x is NaN, since x * 0 is then also NaN. Nor is it valid
1644 when the mode has signed zeros, since multiplying a negative
1645 number by 0 will give -0, not 0. */
1646 if (!HONOR_NANS (mode)
1647 && !HONOR_SIGNED_ZEROS (mode)
1648 && trueop1 == CONST0_RTX (mode)
1649 && ! side_effects_p (op0))
1650 return op1;
1651
1652 /* In IEEE floating point, x*1 is not equivalent to x for
1653 signalling NaNs. */
1654 if (!HONOR_SNANS (mode)
1655 && trueop1 == CONST1_RTX (mode))
1656 return op0;
1657
1658 /* Convert multiply by constant power of two into shift unless
1659 we are still generating RTL. This test is a kludge. */
1660 if (GET_CODE (trueop1) == CONST_INT
1661 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1662 /* If the mode is larger than the host word size, and the
1663 uppermost bit is set, then this isn't a power of two due
1664 to implicit sign extension. */
1665 && (width <= HOST_BITS_PER_WIDE_INT
1666 || val != HOST_BITS_PER_WIDE_INT - 1)
1667 && ! rtx_equal_function_value_matters)
1668 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1669
1670 /* x*2 is x+x and x*(-1) is -x */
1671 if (GET_CODE (trueop1) == CONST_DOUBLE
1672 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1673 && GET_MODE (op0) == mode)
1674 {
1675 REAL_VALUE_TYPE d;
1676 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1677
1678 if (REAL_VALUES_EQUAL (d, dconst2))
1679 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1680
1681 if (REAL_VALUES_EQUAL (d, dconstm1))
1682 return simplify_gen_unary (NEG, mode, op0, mode);
1683 }
1684
1685 /* Reassociate multiplication, but for floating point MULTs
1686 only when the user specifies unsafe math optimizations. */
1687 if (! FLOAT_MODE_P (mode)
1688 || flag_unsafe_math_optimizations)
1689 {
1690 tem = simplify_associative_operation (code, mode, op0, op1);
1691 if (tem)
1692 return tem;
1693 }
1694 break;
1695
1696 case IOR:
1697 if (trueop1 == const0_rtx)
1698 return op0;
1699 if (GET_CODE (trueop1) == CONST_INT
1700 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1701 == GET_MODE_MASK (mode)))
1702 return op1;
1703 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1704 return op0;
1705 /* A | (~A) -> -1 */
1706 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1707 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1708 && ! side_effects_p (op0)
1709 && GET_MODE_CLASS (mode) != MODE_CC)
1710 return constm1_rtx;
1711 tem = simplify_associative_operation (code, mode, op0, op1);
1712 if (tem)
1713 return tem;
1714 break;
1715
1716 case XOR:
1717 if (trueop1 == const0_rtx)
1718 return op0;
1719 if (GET_CODE (trueop1) == CONST_INT
1720 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1721 == GET_MODE_MASK (mode)))
1722 return simplify_gen_unary (NOT, mode, op0, mode);
1723 if (trueop0 == trueop1 && ! side_effects_p (op0)
1724 && GET_MODE_CLASS (mode) != MODE_CC)
1725 return const0_rtx;
1726 tem = simplify_associative_operation (code, mode, op0, op1);
1727 if (tem)
1728 return tem;
1729 break;
1730
1731 case AND:
1732 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1733 return const0_rtx;
1734 if (GET_CODE (trueop1) == CONST_INT
1735 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1736 == GET_MODE_MASK (mode)))
1737 return op0;
1738 if (trueop0 == trueop1 && ! side_effects_p (op0)
1739 && GET_MODE_CLASS (mode) != MODE_CC)
1740 return op0;
1741 /* A & (~A) -> 0 */
1742 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1743 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1744 && ! side_effects_p (op0)
1745 && GET_MODE_CLASS (mode) != MODE_CC)
1746 return const0_rtx;
1747 tem = simplify_associative_operation (code, mode, op0, op1);
1748 if (tem)
1749 return tem;
1750 break;
1751
1752 case UDIV:
1753 /* Convert divide by power of two into shift (divide by 1 handled
1754 below). */
1755 if (GET_CODE (trueop1) == CONST_INT
1756 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1757 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1758
1759 /* Fall through.... */
1760
1761 case DIV:
1762 if (trueop1 == CONST1_RTX (mode))
1763 {
1764 /* On some platforms DIV uses narrower mode than its
1765 operands. */
1766 rtx x = gen_lowpart_common (mode, op0);
1767 if (x)
1768 return x;
1769 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1770 return gen_lowpart_SUBREG (mode, op0);
1771 else
1772 return op0;
1773 }
1774
1775 /* Maybe change 0 / x to 0. This transformation isn't safe for
1776 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1777 Nor is it safe for modes with signed zeros, since dividing
1778 0 by a negative number gives -0, not 0. */
1779 if (!HONOR_NANS (mode)
1780 && !HONOR_SIGNED_ZEROS (mode)
1781 && trueop0 == CONST0_RTX (mode)
1782 && ! side_effects_p (op1))
1783 return op0;
1784
1785 /* Change division by a constant into multiplication. Only do
1786 this with -funsafe-math-optimizations. */
1787 else if (GET_CODE (trueop1) == CONST_DOUBLE
1788 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1789 && trueop1 != CONST0_RTX (mode)
1790 && flag_unsafe_math_optimizations)
1791 {
1792 REAL_VALUE_TYPE d;
1793 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1794
1795 if (! REAL_VALUES_EQUAL (d, dconst0))
1796 {
1797 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1798 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1799 return simplify_gen_binary (MULT, mode, op0, tem);
1800 }
1801 }
1802 break;
1803
1804 case UMOD:
1805 /* Handle modulus by power of two (mod with 1 handled below). */
1806 if (GET_CODE (trueop1) == CONST_INT
1807 && exact_log2 (INTVAL (trueop1)) > 0)
1808 return simplify_gen_binary (AND, mode, op0,
1809 GEN_INT (INTVAL (op1) - 1));
1810
1811 /* Fall through.... */
1812
1813 case MOD:
1814 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1815 && ! side_effects_p (op0) && ! side_effects_p (op1))
1816 return const0_rtx;
1817 break;
1818
1819 case ROTATERT:
1820 case ROTATE:
1821 case ASHIFTRT:
1822 /* Rotating ~0 always results in ~0. */
1823 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1824 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1825 && ! side_effects_p (op1))
1826 return op0;
1827
1828 /* Fall through.... */
1829
1830 case ASHIFT:
1831 case LSHIFTRT:
1832 if (trueop1 == const0_rtx)
1833 return op0;
1834 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1835 return op0;
1836 break;
1837
1838 case SMIN:
1839 if (width <= HOST_BITS_PER_WIDE_INT
1840 && GET_CODE (trueop1) == CONST_INT
1841 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1842 && ! side_effects_p (op0))
1843 return op1;
1844 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1845 return op0;
1846 tem = simplify_associative_operation (code, mode, op0, op1);
1847 if (tem)
1848 return tem;
1849 break;
1850
1851 case SMAX:
1852 if (width <= HOST_BITS_PER_WIDE_INT
1853 && GET_CODE (trueop1) == CONST_INT
1854 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1855 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1856 && ! side_effects_p (op0))
1857 return op1;
1858 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1859 return op0;
1860 tem = simplify_associative_operation (code, mode, op0, op1);
1861 if (tem)
1862 return tem;
1863 break;
1864
1865 case UMIN:
1866 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1867 return op1;
1868 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1869 return op0;
1870 tem = simplify_associative_operation (code, mode, op0, op1);
1871 if (tem)
1872 return tem;
1873 break;
1874
1875 case UMAX:
1876 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1877 return op1;
1878 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1879 return op0;
1880 tem = simplify_associative_operation (code, mode, op0, op1);
1881 if (tem)
1882 return tem;
1883 break;
1884
1885 case SS_PLUS:
1886 case US_PLUS:
1887 case SS_MINUS:
1888 case US_MINUS:
1889 /* ??? There are simplifications that can be done. */
1890 return 0;
1891
1892 case VEC_SELECT:
1893 if (!VECTOR_MODE_P (mode))
1894 {
1895 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1896 || (mode
1897 != GET_MODE_INNER (GET_MODE (trueop0)))
1898 || GET_CODE (trueop1) != PARALLEL
1899 || XVECLEN (trueop1, 0) != 1
1900 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1901 abort ();
1902
1903 if (GET_CODE (trueop0) == CONST_VECTOR)
1904 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1905 }
1906 else
1907 {
1908 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1909 || (GET_MODE_INNER (mode)
1910 != GET_MODE_INNER (GET_MODE (trueop0)))
1911 || GET_CODE (trueop1) != PARALLEL)
1912 abort ();
1913
1914 if (GET_CODE (trueop0) == CONST_VECTOR)
1915 {
1916 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1917 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1918 rtvec v = rtvec_alloc (n_elts);
1919 unsigned int i;
1920
1921 if (XVECLEN (trueop1, 0) != (int) n_elts)
1922 abort ();
1923 for (i = 0; i < n_elts; i++)
1924 {
1925 rtx x = XVECEXP (trueop1, 0, i);
1926
1927 if (GET_CODE (x) != CONST_INT)
1928 abort ();
1929 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1930 }
1931
1932 return gen_rtx_CONST_VECTOR (mode, v);
1933 }
1934 }
1935 return 0;
1936 case VEC_CONCAT:
1937 {
1938 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1939 ? GET_MODE (trueop0)
1940 : GET_MODE_INNER (mode));
1941 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1942 ? GET_MODE (trueop1)
1943 : GET_MODE_INNER (mode));
1944
1945 if (!VECTOR_MODE_P (mode)
1946 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1947 != GET_MODE_SIZE (mode)))
1948 abort ();
1949
1950 if ((VECTOR_MODE_P (op0_mode)
1951 && (GET_MODE_INNER (mode)
1952 != GET_MODE_INNER (op0_mode)))
1953 || (!VECTOR_MODE_P (op0_mode)
1954 && GET_MODE_INNER (mode) != op0_mode))
1955 abort ();
1956
1957 if ((VECTOR_MODE_P (op1_mode)
1958 && (GET_MODE_INNER (mode)
1959 != GET_MODE_INNER (op1_mode)))
1960 || (!VECTOR_MODE_P (op1_mode)
1961 && GET_MODE_INNER (mode) != op1_mode))
1962 abort ();
1963
1964 if ((GET_CODE (trueop0) == CONST_VECTOR
1965 || GET_CODE (trueop0) == CONST_INT
1966 || GET_CODE (trueop0) == CONST_DOUBLE)
1967 && (GET_CODE (trueop1) == CONST_VECTOR
1968 || GET_CODE (trueop1) == CONST_INT
1969 || GET_CODE (trueop1) == CONST_DOUBLE))
1970 {
1971 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1972 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1973 rtvec v = rtvec_alloc (n_elts);
1974 unsigned int i;
1975 unsigned in_n_elts = 1;
1976
1977 if (VECTOR_MODE_P (op0_mode))
1978 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1979 for (i = 0; i < n_elts; i++)
1980 {
1981 if (i < in_n_elts)
1982 {
1983 if (!VECTOR_MODE_P (op0_mode))
1984 RTVEC_ELT (v, i) = trueop0;
1985 else
1986 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1987 }
1988 else
1989 {
1990 if (!VECTOR_MODE_P (op1_mode))
1991 RTVEC_ELT (v, i) = trueop1;
1992 else
1993 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1994 i - in_n_elts);
1995 }
1996 }
1997
1998 return gen_rtx_CONST_VECTOR (mode, v);
1999 }
2000 }
2001 return 0;
2002
2003 default:
2004 abort ();
2005 }
2006
2007 return 0;
2008 }
2009
2010 /* Get the integer argument values in two forms:
2011 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2012
2013 arg0 = INTVAL (trueop0);
2014 arg1 = INTVAL (trueop1);
2015
2016 if (width < HOST_BITS_PER_WIDE_INT)
2017 {
2018 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2019 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2020
2021 arg0s = arg0;
2022 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2023 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2024
2025 arg1s = arg1;
2026 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2027 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2028 }
2029 else
2030 {
2031 arg0s = arg0;
2032 arg1s = arg1;
2033 }
2034
2035 /* Compute the value of the arithmetic. */
2036
2037 switch (code)
2038 {
2039 case PLUS:
2040 val = arg0s + arg1s;
2041 break;
2042
2043 case MINUS:
2044 val = arg0s - arg1s;
2045 break;
2046
2047 case MULT:
2048 val = arg0s * arg1s;
2049 break;
2050
2051 case DIV:
2052 if (arg1s == 0
2053 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2054 && arg1s == -1))
2055 return 0;
2056 val = arg0s / arg1s;
2057 break;
2058
2059 case MOD:
2060 if (arg1s == 0
2061 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2062 && arg1s == -1))
2063 return 0;
2064 val = arg0s % arg1s;
2065 break;
2066
2067 case UDIV:
2068 if (arg1 == 0
2069 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2070 && arg1s == -1))
2071 return 0;
2072 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2073 break;
2074
2075 case UMOD:
2076 if (arg1 == 0
2077 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2078 && arg1s == -1))
2079 return 0;
2080 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2081 break;
2082
2083 case AND:
2084 val = arg0 & arg1;
2085 break;
2086
2087 case IOR:
2088 val = arg0 | arg1;
2089 break;
2090
2091 case XOR:
2092 val = arg0 ^ arg1;
2093 break;
2094
2095 case LSHIFTRT:
2096 /* If shift count is undefined, don't fold it; let the machine do
2097 what it wants. But truncate it if the machine will do that. */
2098 if (arg1 < 0)
2099 return 0;
2100
2101 #ifdef SHIFT_COUNT_TRUNCATED
2102 if (SHIFT_COUNT_TRUNCATED)
2103 arg1 %= width;
2104 #endif
2105
2106 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2107 break;
2108
2109 case ASHIFT:
2110 if (arg1 < 0)
2111 return 0;
2112
2113 #ifdef SHIFT_COUNT_TRUNCATED
2114 if (SHIFT_COUNT_TRUNCATED)
2115 arg1 %= width;
2116 #endif
2117
2118 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2119 break;
2120
2121 case ASHIFTRT:
2122 if (arg1 < 0)
2123 return 0;
2124
2125 #ifdef SHIFT_COUNT_TRUNCATED
2126 if (SHIFT_COUNT_TRUNCATED)
2127 arg1 %= width;
2128 #endif
2129
2130 val = arg0s >> arg1;
2131
2132 /* Bootstrap compiler may not have sign extended the right shift.
2133 Manually extend the sign to insure bootstrap cc matches gcc. */
2134 if (arg0s < 0 && arg1 > 0)
2135 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2136
2137 break;
2138
2139 case ROTATERT:
2140 if (arg1 < 0)
2141 return 0;
2142
2143 arg1 %= width;
2144 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2145 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2146 break;
2147
2148 case ROTATE:
2149 if (arg1 < 0)
2150 return 0;
2151
2152 arg1 %= width;
2153 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2154 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2155 break;
2156
2157 case COMPARE:
2158 /* Do nothing here. */
2159 return 0;
2160
2161 case SMIN:
2162 val = arg0s <= arg1s ? arg0s : arg1s;
2163 break;
2164
2165 case UMIN:
2166 val = ((unsigned HOST_WIDE_INT) arg0
2167 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2168 break;
2169
2170 case SMAX:
2171 val = arg0s > arg1s ? arg0s : arg1s;
2172 break;
2173
2174 case UMAX:
2175 val = ((unsigned HOST_WIDE_INT) arg0
2176 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2177 break;
2178
2179 case SS_PLUS:
2180 case US_PLUS:
2181 case SS_MINUS:
2182 case US_MINUS:
2183 /* ??? There are simplifications that can be done. */
2184 return 0;
2185
2186 default:
2187 abort ();
2188 }
2189
2190 val = trunc_int_for_mode (val, mode);
2191
2192 return GEN_INT (val);
2193 }
2194 \f
2195 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2196 PLUS or MINUS.
2197
2198 Rather than test for specific case, we do this by a brute-force method
2199 and do all possible simplifications until no more changes occur. Then
2200 we rebuild the operation.
2201
2202 If FORCE is true, then always generate the rtx. This is used to
2203 canonicalize stuff emitted from simplify_gen_binary. Note that this
2204 can still fail if the rtx is too complex. It won't fail just because
2205 the result is not 'simpler' than the input, however. */
2206
2207 struct simplify_plus_minus_op_data
2208 {
2209 rtx op;
2210 int neg;
2211 };
2212
2213 static int
2214 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2215 {
2216 const struct simplify_plus_minus_op_data *d1 = p1;
2217 const struct simplify_plus_minus_op_data *d2 = p2;
2218
2219 return (commutative_operand_precedence (d2->op)
2220 - commutative_operand_precedence (d1->op));
2221 }
2222
2223 static rtx
2224 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2225 rtx op1, int force)
2226 {
2227 struct simplify_plus_minus_op_data ops[8];
2228 rtx result, tem;
2229 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2230 int first, negate, changed;
2231 int i, j;
2232
2233 memset (ops, 0, sizeof ops);
2234
2235 /* Set up the two operands and then expand them until nothing has been
2236 changed. If we run out of room in our array, give up; this should
2237 almost never happen. */
2238
2239 ops[0].op = op0;
2240 ops[0].neg = 0;
2241 ops[1].op = op1;
2242 ops[1].neg = (code == MINUS);
2243
2244 do
2245 {
2246 changed = 0;
2247
2248 for (i = 0; i < n_ops; i++)
2249 {
2250 rtx this_op = ops[i].op;
2251 int this_neg = ops[i].neg;
2252 enum rtx_code this_code = GET_CODE (this_op);
2253
2254 switch (this_code)
2255 {
2256 case PLUS:
2257 case MINUS:
2258 if (n_ops == 7)
2259 return NULL_RTX;
2260
2261 ops[n_ops].op = XEXP (this_op, 1);
2262 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2263 n_ops++;
2264
2265 ops[i].op = XEXP (this_op, 0);
2266 input_ops++;
2267 changed = 1;
2268 break;
2269
2270 case NEG:
2271 ops[i].op = XEXP (this_op, 0);
2272 ops[i].neg = ! this_neg;
2273 changed = 1;
2274 break;
2275
2276 case CONST:
2277 if (n_ops < 7
2278 && GET_CODE (XEXP (this_op, 0)) == PLUS
2279 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2280 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2281 {
2282 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2283 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2284 ops[n_ops].neg = this_neg;
2285 n_ops++;
2286 input_consts++;
2287 changed = 1;
2288 }
2289 break;
2290
2291 case NOT:
2292 /* ~a -> (-a - 1) */
2293 if (n_ops != 7)
2294 {
2295 ops[n_ops].op = constm1_rtx;
2296 ops[n_ops++].neg = this_neg;
2297 ops[i].op = XEXP (this_op, 0);
2298 ops[i].neg = !this_neg;
2299 changed = 1;
2300 }
2301 break;
2302
2303 case CONST_INT:
2304 if (this_neg)
2305 {
2306 ops[i].op = neg_const_int (mode, this_op);
2307 ops[i].neg = 0;
2308 changed = 1;
2309 }
2310 break;
2311
2312 default:
2313 break;
2314 }
2315 }
2316 }
2317 while (changed);
2318
2319 /* If we only have two operands, we can't do anything. */
2320 if (n_ops <= 2 && !force)
2321 return NULL_RTX;
2322
2323 /* Count the number of CONSTs we didn't split above. */
2324 for (i = 0; i < n_ops; i++)
2325 if (GET_CODE (ops[i].op) == CONST)
2326 input_consts++;
2327
2328 /* Now simplify each pair of operands until nothing changes. The first
2329 time through just simplify constants against each other. */
2330
2331 first = 1;
2332 do
2333 {
2334 changed = first;
2335
2336 for (i = 0; i < n_ops - 1; i++)
2337 for (j = i + 1; j < n_ops; j++)
2338 {
2339 rtx lhs = ops[i].op, rhs = ops[j].op;
2340 int lneg = ops[i].neg, rneg = ops[j].neg;
2341
2342 if (lhs != 0 && rhs != 0
2343 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2344 {
2345 enum rtx_code ncode = PLUS;
2346
2347 if (lneg != rneg)
2348 {
2349 ncode = MINUS;
2350 if (lneg)
2351 tem = lhs, lhs = rhs, rhs = tem;
2352 }
2353 else if (swap_commutative_operands_p (lhs, rhs))
2354 tem = lhs, lhs = rhs, rhs = tem;
2355
2356 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2357
2358 /* Reject "simplifications" that just wrap the two
2359 arguments in a CONST. Failure to do so can result
2360 in infinite recursion with simplify_binary_operation
2361 when it calls us to simplify CONST operations. */
2362 if (tem
2363 && ! (GET_CODE (tem) == CONST
2364 && GET_CODE (XEXP (tem, 0)) == ncode
2365 && XEXP (XEXP (tem, 0), 0) == lhs
2366 && XEXP (XEXP (tem, 0), 1) == rhs)
2367 /* Don't allow -x + -1 -> ~x simplifications in the
2368 first pass. This allows us the chance to combine
2369 the -1 with other constants. */
2370 && ! (first
2371 && GET_CODE (tem) == NOT
2372 && XEXP (tem, 0) == rhs))
2373 {
2374 lneg &= rneg;
2375 if (GET_CODE (tem) == NEG)
2376 tem = XEXP (tem, 0), lneg = !lneg;
2377 if (GET_CODE (tem) == CONST_INT && lneg)
2378 tem = neg_const_int (mode, tem), lneg = 0;
2379
2380 ops[i].op = tem;
2381 ops[i].neg = lneg;
2382 ops[j].op = NULL_RTX;
2383 changed = 1;
2384 }
2385 }
2386 }
2387
2388 first = 0;
2389 }
2390 while (changed);
2391
2392 /* Pack all the operands to the lower-numbered entries. */
2393 for (i = 0, j = 0; j < n_ops; j++)
2394 if (ops[j].op)
2395 ops[i++] = ops[j];
2396 n_ops = i;
2397
2398 /* Sort the operations based on swap_commutative_operands_p. */
2399 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2400
2401 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2402 if (n_ops == 2
2403 && GET_CODE (ops[1].op) == CONST_INT
2404 && CONSTANT_P (ops[0].op)
2405 && ops[0].neg)
2406 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2407
2408 /* We suppressed creation of trivial CONST expressions in the
2409 combination loop to avoid recursion. Create one manually now.
2410 The combination loop should have ensured that there is exactly
2411 one CONST_INT, and the sort will have ensured that it is last
2412 in the array and that any other constant will be next-to-last. */
2413
2414 if (n_ops > 1
2415 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2416 && CONSTANT_P (ops[n_ops - 2].op))
2417 {
2418 rtx value = ops[n_ops - 1].op;
2419 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2420 value = neg_const_int (mode, value);
2421 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2422 n_ops--;
2423 }
2424
2425 /* Count the number of CONSTs that we generated. */
2426 n_consts = 0;
2427 for (i = 0; i < n_ops; i++)
2428 if (GET_CODE (ops[i].op) == CONST)
2429 n_consts++;
2430
2431 /* Give up if we didn't reduce the number of operands we had. Make
2432 sure we count a CONST as two operands. If we have the same
2433 number of operands, but have made more CONSTs than before, this
2434 is also an improvement, so accept it. */
2435 if (!force
2436 && (n_ops + n_consts > input_ops
2437 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2438 return NULL_RTX;
2439
2440 /* Put a non-negated operand first. If there aren't any, make all
2441 operands positive and negate the whole thing later. */
2442
2443 negate = 0;
2444 for (i = 0; i < n_ops && ops[i].neg; i++)
2445 continue;
2446 if (i == n_ops)
2447 {
2448 for (i = 0; i < n_ops; i++)
2449 ops[i].neg = 0;
2450 negate = 1;
2451 }
2452 else if (i != 0)
2453 {
2454 tem = ops[0].op;
2455 ops[0] = ops[i];
2456 ops[i].op = tem;
2457 ops[i].neg = 1;
2458 }
2459
2460 /* Now make the result by performing the requested operations. */
2461 result = ops[0].op;
2462 for (i = 1; i < n_ops; i++)
2463 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2464 mode, result, ops[i].op);
2465
2466 return negate ? gen_rtx_NEG (mode, result) : result;
2467 }
2468
2469 /* Like simplify_binary_operation except used for relational operators.
2470 MODE is the mode of the operands, not that of the result. If MODE
2471 is VOIDmode, both operands must also be VOIDmode and we compare the
2472 operands in "infinite precision".
2473
2474 If no simplification is possible, this function returns zero. Otherwise,
2475 it returns either const_true_rtx or const0_rtx. */
2476
2477 rtx
2478 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2479 rtx op0, rtx op1)
2480 {
2481 int equal, op0lt, op0ltu, op1lt, op1ltu;
2482 rtx tem;
2483 rtx trueop0;
2484 rtx trueop1;
2485
2486 if (mode == VOIDmode
2487 && (GET_MODE (op0) != VOIDmode
2488 || GET_MODE (op1) != VOIDmode))
2489 abort ();
2490
2491 /* If op0 is a compare, extract the comparison arguments from it. */
2492 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2493 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2494
2495 trueop0 = avoid_constant_pool_reference (op0);
2496 trueop1 = avoid_constant_pool_reference (op1);
2497
2498 /* We can't simplify MODE_CC values since we don't know what the
2499 actual comparison is. */
2500 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2501 return 0;
2502
2503 /* Make sure the constant is second. */
2504 if (swap_commutative_operands_p (trueop0, trueop1))
2505 {
2506 tem = op0, op0 = op1, op1 = tem;
2507 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2508 code = swap_condition (code);
2509 }
2510
2511 /* For integer comparisons of A and B maybe we can simplify A - B and can
2512 then simplify a comparison of that with zero. If A and B are both either
2513 a register or a CONST_INT, this can't help; testing for these cases will
2514 prevent infinite recursion here and speed things up.
2515
2516 If CODE is an unsigned comparison, then we can never do this optimization,
2517 because it gives an incorrect result if the subtraction wraps around zero.
2518 ANSI C defines unsigned operations such that they never overflow, and
2519 thus such cases can not be ignored. */
2520
2521 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2522 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2523 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2524 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2525 && code != GTU && code != GEU && code != LTU && code != LEU)
2526 return simplify_relational_operation (signed_condition (code),
2527 mode, tem, const0_rtx);
2528
2529 if (flag_unsafe_math_optimizations && code == ORDERED)
2530 return const_true_rtx;
2531
2532 if (flag_unsafe_math_optimizations && code == UNORDERED)
2533 return const0_rtx;
2534
2535 /* For modes without NaNs, if the two operands are equal, we know the
2536 result except if they have side-effects. */
2537 if (! HONOR_NANS (GET_MODE (trueop0))
2538 && rtx_equal_p (trueop0, trueop1)
2539 && ! side_effects_p (trueop0))
2540 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2541
2542 /* If the operands are floating-point constants, see if we can fold
2543 the result. */
2544 else if (GET_CODE (trueop0) == CONST_DOUBLE
2545 && GET_CODE (trueop1) == CONST_DOUBLE
2546 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2547 {
2548 REAL_VALUE_TYPE d0, d1;
2549
2550 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2551 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2552
2553 /* Comparisons are unordered iff at least one of the values is NaN. */
2554 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2555 switch (code)
2556 {
2557 case UNEQ:
2558 case UNLT:
2559 case UNGT:
2560 case UNLE:
2561 case UNGE:
2562 case NE:
2563 case UNORDERED:
2564 return const_true_rtx;
2565 case EQ:
2566 case LT:
2567 case GT:
2568 case LE:
2569 case GE:
2570 case LTGT:
2571 case ORDERED:
2572 return const0_rtx;
2573 default:
2574 return 0;
2575 }
2576
2577 equal = REAL_VALUES_EQUAL (d0, d1);
2578 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2579 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2580 }
2581
2582 /* Otherwise, see if the operands are both integers. */
2583 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2584 && (GET_CODE (trueop0) == CONST_DOUBLE
2585 || GET_CODE (trueop0) == CONST_INT)
2586 && (GET_CODE (trueop1) == CONST_DOUBLE
2587 || GET_CODE (trueop1) == CONST_INT))
2588 {
2589 int width = GET_MODE_BITSIZE (mode);
2590 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2591 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2592
2593 /* Get the two words comprising each integer constant. */
2594 if (GET_CODE (trueop0) == CONST_DOUBLE)
2595 {
2596 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2597 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2598 }
2599 else
2600 {
2601 l0u = l0s = INTVAL (trueop0);
2602 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2603 }
2604
2605 if (GET_CODE (trueop1) == CONST_DOUBLE)
2606 {
2607 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2608 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2609 }
2610 else
2611 {
2612 l1u = l1s = INTVAL (trueop1);
2613 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2614 }
2615
2616 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2617 we have to sign or zero-extend the values. */
2618 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2619 {
2620 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2621 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2622
2623 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2624 l0s |= ((HOST_WIDE_INT) (-1) << width);
2625
2626 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2627 l1s |= ((HOST_WIDE_INT) (-1) << width);
2628 }
2629 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2630 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2631
2632 equal = (h0u == h1u && l0u == l1u);
2633 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2634 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2635 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2636 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2637 }
2638
2639 /* Otherwise, there are some code-specific tests we can make. */
2640 else
2641 {
2642 switch (code)
2643 {
2644 case EQ:
2645 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2646 return const0_rtx;
2647 break;
2648
2649 case NE:
2650 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2651 return const_true_rtx;
2652 break;
2653
2654 case GEU:
2655 /* Unsigned values are never negative. */
2656 if (trueop1 == const0_rtx)
2657 return const_true_rtx;
2658 break;
2659
2660 case LTU:
2661 if (trueop1 == const0_rtx)
2662 return const0_rtx;
2663 break;
2664
2665 case LEU:
2666 /* Unsigned values are never greater than the largest
2667 unsigned value. */
2668 if (GET_CODE (trueop1) == CONST_INT
2669 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2670 && INTEGRAL_MODE_P (mode))
2671 return const_true_rtx;
2672 break;
2673
2674 case GTU:
2675 if (GET_CODE (trueop1) == CONST_INT
2676 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2677 && INTEGRAL_MODE_P (mode))
2678 return const0_rtx;
2679 break;
2680
2681 case LT:
2682 /* Optimize abs(x) < 0.0. */
2683 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2684 {
2685 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2686 : trueop0;
2687 if (GET_CODE (tem) == ABS)
2688 return const0_rtx;
2689 }
2690 break;
2691
2692 case GE:
2693 /* Optimize abs(x) >= 0.0. */
2694 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2695 {
2696 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2697 : trueop0;
2698 if (GET_CODE (tem) == ABS)
2699 return const_true_rtx;
2700 }
2701 break;
2702
2703 case UNGE:
2704 /* Optimize ! (abs(x) < 0.0). */
2705 if (trueop1 == CONST0_RTX (mode))
2706 {
2707 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2708 : trueop0;
2709 if (GET_CODE (tem) == ABS)
2710 return const_true_rtx;
2711 }
2712 break;
2713
2714 default:
2715 break;
2716 }
2717
2718 return 0;
2719 }
2720
2721 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2722 as appropriate. */
2723 switch (code)
2724 {
2725 case EQ:
2726 case UNEQ:
2727 return equal ? const_true_rtx : const0_rtx;
2728 case NE:
2729 case LTGT:
2730 return ! equal ? const_true_rtx : const0_rtx;
2731 case LT:
2732 case UNLT:
2733 return op0lt ? const_true_rtx : const0_rtx;
2734 case GT:
2735 case UNGT:
2736 return op1lt ? const_true_rtx : const0_rtx;
2737 case LTU:
2738 return op0ltu ? const_true_rtx : const0_rtx;
2739 case GTU:
2740 return op1ltu ? const_true_rtx : const0_rtx;
2741 case LE:
2742 case UNLE:
2743 return equal || op0lt ? const_true_rtx : const0_rtx;
2744 case GE:
2745 case UNGE:
2746 return equal || op1lt ? const_true_rtx : const0_rtx;
2747 case LEU:
2748 return equal || op0ltu ? const_true_rtx : const0_rtx;
2749 case GEU:
2750 return equal || op1ltu ? const_true_rtx : const0_rtx;
2751 case ORDERED:
2752 return const_true_rtx;
2753 case UNORDERED:
2754 return const0_rtx;
2755 default:
2756 abort ();
2757 }
2758 }
2759 \f
2760 /* Simplify CODE, an operation with result mode MODE and three operands,
2761 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2762 a constant. Return 0 if no simplifications is possible. */
2763
2764 rtx
2765 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2766 enum machine_mode op0_mode, rtx op0, rtx op1,
2767 rtx op2)
2768 {
2769 unsigned int width = GET_MODE_BITSIZE (mode);
2770
2771 /* VOIDmode means "infinite" precision. */
2772 if (width == 0)
2773 width = HOST_BITS_PER_WIDE_INT;
2774
2775 switch (code)
2776 {
2777 case SIGN_EXTRACT:
2778 case ZERO_EXTRACT:
2779 if (GET_CODE (op0) == CONST_INT
2780 && GET_CODE (op1) == CONST_INT
2781 && GET_CODE (op2) == CONST_INT
2782 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2783 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2784 {
2785 /* Extracting a bit-field from a constant */
2786 HOST_WIDE_INT val = INTVAL (op0);
2787
2788 if (BITS_BIG_ENDIAN)
2789 val >>= (GET_MODE_BITSIZE (op0_mode)
2790 - INTVAL (op2) - INTVAL (op1));
2791 else
2792 val >>= INTVAL (op2);
2793
2794 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2795 {
2796 /* First zero-extend. */
2797 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2798 /* If desired, propagate sign bit. */
2799 if (code == SIGN_EXTRACT
2800 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2801 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2802 }
2803
2804 /* Clear the bits that don't belong in our mode,
2805 unless they and our sign bit are all one.
2806 So we get either a reasonable negative value or a reasonable
2807 unsigned value for this mode. */
2808 if (width < HOST_BITS_PER_WIDE_INT
2809 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2810 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2811 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2812
2813 return GEN_INT (val);
2814 }
2815 break;
2816
2817 case IF_THEN_ELSE:
2818 if (GET_CODE (op0) == CONST_INT)
2819 return op0 != const0_rtx ? op1 : op2;
2820
2821 /* Convert a == b ? b : a to "a". */
2822 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2823 && !HONOR_NANS (mode)
2824 && rtx_equal_p (XEXP (op0, 0), op1)
2825 && rtx_equal_p (XEXP (op0, 1), op2))
2826 return op1;
2827 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2828 && !HONOR_NANS (mode)
2829 && rtx_equal_p (XEXP (op0, 1), op1)
2830 && rtx_equal_p (XEXP (op0, 0), op2))
2831 return op2;
2832 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2833 {
2834 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2835 ? GET_MODE (XEXP (op0, 1))
2836 : GET_MODE (XEXP (op0, 0)));
2837 rtx temp;
2838 if (cmp_mode == VOIDmode)
2839 cmp_mode = op0_mode;
2840 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2841 XEXP (op0, 0), XEXP (op0, 1));
2842
2843 /* See if any simplifications were possible. */
2844 if (temp == const0_rtx)
2845 return op2;
2846 else if (temp == const_true_rtx)
2847 return op1;
2848 else if (temp)
2849 abort ();
2850
2851 /* Look for happy constants in op1 and op2. */
2852 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2853 {
2854 HOST_WIDE_INT t = INTVAL (op1);
2855 HOST_WIDE_INT f = INTVAL (op2);
2856
2857 if (t == STORE_FLAG_VALUE && f == 0)
2858 code = GET_CODE (op0);
2859 else if (t == 0 && f == STORE_FLAG_VALUE)
2860 {
2861 enum rtx_code tmp;
2862 tmp = reversed_comparison_code (op0, NULL_RTX);
2863 if (tmp == UNKNOWN)
2864 break;
2865 code = tmp;
2866 }
2867 else
2868 break;
2869
2870 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2871 }
2872 }
2873 break;
2874 case VEC_MERGE:
2875 if (GET_MODE (op0) != mode
2876 || GET_MODE (op1) != mode
2877 || !VECTOR_MODE_P (mode))
2878 abort ();
2879 op2 = avoid_constant_pool_reference (op2);
2880 if (GET_CODE (op2) == CONST_INT)
2881 {
2882 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2883 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2884 int mask = (1 << n_elts) - 1;
2885
2886 if (!(INTVAL (op2) & mask))
2887 return op1;
2888 if ((INTVAL (op2) & mask) == mask)
2889 return op0;
2890
2891 op0 = avoid_constant_pool_reference (op0);
2892 op1 = avoid_constant_pool_reference (op1);
2893 if (GET_CODE (op0) == CONST_VECTOR
2894 && GET_CODE (op1) == CONST_VECTOR)
2895 {
2896 rtvec v = rtvec_alloc (n_elts);
2897 unsigned int i;
2898
2899 for (i = 0; i < n_elts; i++)
2900 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2901 ? CONST_VECTOR_ELT (op0, i)
2902 : CONST_VECTOR_ELT (op1, i));
2903 return gen_rtx_CONST_VECTOR (mode, v);
2904 }
2905 }
2906 break;
2907
2908 default:
2909 abort ();
2910 }
2911
2912 return 0;
2913 }
2914
2915 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2916 Return 0 if no simplifications is possible. */
2917 rtx
2918 simplify_subreg (enum machine_mode outermode, rtx op,
2919 enum machine_mode innermode, unsigned int byte)
2920 {
2921 /* Little bit of sanity checking. */
2922 if (innermode == VOIDmode || outermode == VOIDmode
2923 || innermode == BLKmode || outermode == BLKmode)
2924 abort ();
2925
2926 if (GET_MODE (op) != innermode
2927 && GET_MODE (op) != VOIDmode)
2928 abort ();
2929
2930 if (byte % GET_MODE_SIZE (outermode)
2931 || byte >= GET_MODE_SIZE (innermode))
2932 abort ();
2933
2934 if (outermode == innermode && !byte)
2935 return op;
2936
2937 /* Simplify subregs of vector constants. */
2938 if (GET_CODE (op) == CONST_VECTOR)
2939 {
2940 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2941 const unsigned int offset = byte / elt_size;
2942 rtx elt;
2943
2944 if (GET_MODE_INNER (innermode) == outermode)
2945 {
2946 elt = CONST_VECTOR_ELT (op, offset);
2947
2948 /* ?? We probably don't need this copy_rtx because constants
2949 can be shared. ?? */
2950
2951 return copy_rtx (elt);
2952 }
2953 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2954 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2955 {
2956 return (gen_rtx_CONST_VECTOR
2957 (outermode,
2958 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2959 &CONST_VECTOR_ELT (op, offset))));
2960 }
2961 else if (GET_MODE_CLASS (outermode) == MODE_INT
2962 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2963 {
2964 /* This happens when the target register size is smaller then
2965 the vector mode, and we synthesize operations with vectors
2966 of elements that are smaller than the register size. */
2967 HOST_WIDE_INT sum = 0, high = 0;
2968 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2969 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2970 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2971 int shift = BITS_PER_UNIT * elt_size;
2972 unsigned HOST_WIDE_INT unit_mask;
2973
2974 unit_mask = (unsigned HOST_WIDE_INT) -1
2975 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2976
2977 for (; n_elts--; i += step)
2978 {
2979 elt = CONST_VECTOR_ELT (op, i);
2980 if (GET_CODE (elt) == CONST_DOUBLE
2981 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2982 {
2983 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2984 elt);
2985 if (! elt)
2986 return NULL_RTX;
2987 }
2988 if (GET_CODE (elt) != CONST_INT)
2989 return NULL_RTX;
2990 /* Avoid overflow. */
2991 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2992 return NULL_RTX;
2993 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2994 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2995 }
2996 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2997 return GEN_INT (trunc_int_for_mode (sum, outermode));
2998 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2999 return immed_double_const (sum, high, outermode);
3000 else
3001 return NULL_RTX;
3002 }
3003 else if (GET_MODE_CLASS (outermode) == MODE_INT
3004 && (elt_size % GET_MODE_SIZE (outermode) == 0))
3005 {
3006 enum machine_mode new_mode
3007 = int_mode_for_mode (GET_MODE_INNER (innermode));
3008 int subbyte = byte % elt_size;
3009
3010 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
3011 if (! op)
3012 return NULL_RTX;
3013 return simplify_subreg (outermode, op, new_mode, subbyte);
3014 }
3015 else if (GET_MODE_CLASS (outermode) == MODE_INT)
3016 /* This shouldn't happen, but let's not do anything stupid. */
3017 return NULL_RTX;
3018 }
3019
3020 /* Attempt to simplify constant to non-SUBREG expression. */
3021 if (CONSTANT_P (op))
3022 {
3023 int offset, part;
3024 unsigned HOST_WIDE_INT val = 0;
3025
3026 if (VECTOR_MODE_P (outermode))
3027 {
3028 /* Construct a CONST_VECTOR from individual subregs. */
3029 enum machine_mode submode = GET_MODE_INNER (outermode);
3030 int subsize = GET_MODE_UNIT_SIZE (outermode);
3031 int i, elts = GET_MODE_NUNITS (outermode);
3032 rtvec v = rtvec_alloc (elts);
3033 rtx elt;
3034
3035 for (i = 0; i < elts; i++, byte += subsize)
3036 {
3037 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3038 /* ??? It would be nice if we could actually make such subregs
3039 on targets that allow such relocations. */
3040 if (byte >= GET_MODE_SIZE (innermode))
3041 elt = CONST0_RTX (submode);
3042 else
3043 elt = simplify_subreg (submode, op, innermode, byte);
3044 if (! elt)
3045 return NULL_RTX;
3046 RTVEC_ELT (v, i) = elt;
3047 }
3048 return gen_rtx_CONST_VECTOR (outermode, v);
3049 }
3050
3051 /* ??? This code is partly redundant with code below, but can handle
3052 the subregs of floats and similar corner cases.
3053 Later it we should move all simplification code here and rewrite
3054 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3055 using SIMPLIFY_SUBREG. */
3056 if (subreg_lowpart_offset (outermode, innermode) == byte
3057 && GET_CODE (op) != CONST_VECTOR)
3058 {
3059 rtx new = gen_lowpart_if_possible (outermode, op);
3060 if (new)
3061 return new;
3062 }
3063
3064 /* Similar comment as above apply here. */
3065 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
3066 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
3067 && GET_MODE_CLASS (outermode) == MODE_INT)
3068 {
3069 rtx new = constant_subword (op,
3070 (byte / UNITS_PER_WORD),
3071 innermode);
3072 if (new)
3073 return new;
3074 }
3075
3076 if (GET_MODE_CLASS (outermode) != MODE_INT
3077 && GET_MODE_CLASS (outermode) != MODE_CC)
3078 {
3079 enum machine_mode new_mode = int_mode_for_mode (outermode);
3080
3081 if (new_mode != innermode || byte != 0)
3082 {
3083 op = simplify_subreg (new_mode, op, innermode, byte);
3084 if (! op)
3085 return NULL_RTX;
3086 return simplify_subreg (outermode, op, new_mode, 0);
3087 }
3088 }
3089
3090 offset = byte * BITS_PER_UNIT;
3091 switch (GET_CODE (op))
3092 {
3093 case CONST_DOUBLE:
3094 if (GET_MODE (op) != VOIDmode)
3095 break;
3096
3097 /* We can't handle this case yet. */
3098 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
3099 return NULL_RTX;
3100
3101 part = offset >= HOST_BITS_PER_WIDE_INT;
3102 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
3103 && BYTES_BIG_ENDIAN)
3104 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
3105 && WORDS_BIG_ENDIAN))
3106 part = !part;
3107 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
3108 offset %= HOST_BITS_PER_WIDE_INT;
3109
3110 /* We've already picked the word we want from a double, so
3111 pretend this is actually an integer. */
3112 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3113
3114 /* FALLTHROUGH */
3115 case CONST_INT:
3116 if (GET_CODE (op) == CONST_INT)
3117 val = INTVAL (op);
3118
3119 /* We don't handle synthesizing of non-integral constants yet. */
3120 if (GET_MODE_CLASS (outermode) != MODE_INT)
3121 return NULL_RTX;
3122
3123 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3124 {
3125 if (WORDS_BIG_ENDIAN)
3126 offset = (GET_MODE_BITSIZE (innermode)
3127 - GET_MODE_BITSIZE (outermode) - offset);
3128 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3129 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3130 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3131 - 2 * (offset % BITS_PER_WORD));
3132 }
3133
3134 if (offset >= HOST_BITS_PER_WIDE_INT)
3135 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3136 else
3137 {
3138 val >>= offset;
3139 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3140 val = trunc_int_for_mode (val, outermode);
3141 return GEN_INT (val);
3142 }
3143 default:
3144 break;
3145 }
3146 }
3147
3148 /* Changing mode twice with SUBREG => just change it once,
3149 or not at all if changing back op starting mode. */
3150 if (GET_CODE (op) == SUBREG)
3151 {
3152 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3153 int final_offset = byte + SUBREG_BYTE (op);
3154 rtx new;
3155
3156 if (outermode == innermostmode
3157 && byte == 0 && SUBREG_BYTE (op) == 0)
3158 return SUBREG_REG (op);
3159
3160 /* The SUBREG_BYTE represents offset, as if the value were stored
3161 in memory. Irritating exception is paradoxical subreg, where
3162 we define SUBREG_BYTE to be 0. On big endian machines, this
3163 value should be negative. For a moment, undo this exception. */
3164 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3165 {
3166 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3167 if (WORDS_BIG_ENDIAN)
3168 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3169 if (BYTES_BIG_ENDIAN)
3170 final_offset += difference % UNITS_PER_WORD;
3171 }
3172 if (SUBREG_BYTE (op) == 0
3173 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3174 {
3175 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3176 if (WORDS_BIG_ENDIAN)
3177 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3178 if (BYTES_BIG_ENDIAN)
3179 final_offset += difference % UNITS_PER_WORD;
3180 }
3181
3182 /* See whether resulting subreg will be paradoxical. */
3183 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3184 {
3185 /* In nonparadoxical subregs we can't handle negative offsets. */
3186 if (final_offset < 0)
3187 return NULL_RTX;
3188 /* Bail out in case resulting subreg would be incorrect. */
3189 if (final_offset % GET_MODE_SIZE (outermode)
3190 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3191 return NULL_RTX;
3192 }
3193 else
3194 {
3195 int offset = 0;
3196 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3197
3198 /* In paradoxical subreg, see if we are still looking on lower part.
3199 If so, our SUBREG_BYTE will be 0. */
3200 if (WORDS_BIG_ENDIAN)
3201 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3202 if (BYTES_BIG_ENDIAN)
3203 offset += difference % UNITS_PER_WORD;
3204 if (offset == final_offset)
3205 final_offset = 0;
3206 else
3207 return NULL_RTX;
3208 }
3209
3210 /* Recurse for further possible simplifications. */
3211 new = simplify_subreg (outermode, SUBREG_REG (op),
3212 GET_MODE (SUBREG_REG (op)),
3213 final_offset);
3214 if (new)
3215 return new;
3216 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3217 }
3218
3219 /* SUBREG of a hard register => just change the register number
3220 and/or mode. If the hard register is not valid in that mode,
3221 suppress this simplification. If the hard register is the stack,
3222 frame, or argument pointer, leave this as a SUBREG. */
3223
3224 if (REG_P (op)
3225 && (! REG_FUNCTION_VALUE_P (op)
3226 || ! rtx_equal_function_value_matters)
3227 && REGNO (op) < FIRST_PSEUDO_REGISTER
3228 #ifdef CANNOT_CHANGE_MODE_CLASS
3229 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3230 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3231 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3232 #endif
3233 && ((reload_completed && !frame_pointer_needed)
3234 || (REGNO (op) != FRAME_POINTER_REGNUM
3235 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3236 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3237 #endif
3238 ))
3239 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3240 && REGNO (op) != ARG_POINTER_REGNUM
3241 #endif
3242 && REGNO (op) != STACK_POINTER_REGNUM
3243 && subreg_offset_representable_p (REGNO (op), innermode,
3244 byte, outermode))
3245 {
3246 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3247 int final_regno = subreg_hard_regno (tem, 0);
3248
3249 /* ??? We do allow it if the current REG is not valid for
3250 its mode. This is a kludge to work around how float/complex
3251 arguments are passed on 32-bit SPARC and should be fixed. */
3252 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3253 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3254 {
3255 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3256
3257 /* Propagate original regno. We don't have any way to specify
3258 the offset inside original regno, so do so only for lowpart.
3259 The information is used only by alias analysis that can not
3260 grog partial register anyway. */
3261
3262 if (subreg_lowpart_offset (outermode, innermode) == byte)
3263 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3264 return x;
3265 }
3266 }
3267
3268 /* If we have a SUBREG of a register that we are replacing and we are
3269 replacing it with a MEM, make a new MEM and try replacing the
3270 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3271 or if we would be widening it. */
3272
3273 if (GET_CODE (op) == MEM
3274 && ! mode_dependent_address_p (XEXP (op, 0))
3275 /* Allow splitting of volatile memory references in case we don't
3276 have instruction to move the whole thing. */
3277 && (! MEM_VOLATILE_P (op)
3278 || ! have_insn_for (SET, innermode))
3279 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3280 return adjust_address_nv (op, outermode, byte);
3281
3282 /* Handle complex values represented as CONCAT
3283 of real and imaginary part. */
3284 if (GET_CODE (op) == CONCAT)
3285 {
3286 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3287 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3288 unsigned int final_offset;
3289 rtx res;
3290
3291 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3292 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3293 if (res)
3294 return res;
3295 /* We can at least simplify it by referring directly to the relevant part. */
3296 return gen_rtx_SUBREG (outermode, part, final_offset);
3297 }
3298
3299 return NULL_RTX;
3300 }
3301 /* Make a SUBREG operation or equivalent if it folds. */
3302
3303 rtx
3304 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3305 enum machine_mode innermode, unsigned int byte)
3306 {
3307 rtx new;
3308 /* Little bit of sanity checking. */
3309 if (innermode == VOIDmode || outermode == VOIDmode
3310 || innermode == BLKmode || outermode == BLKmode)
3311 abort ();
3312
3313 if (GET_MODE (op) != innermode
3314 && GET_MODE (op) != VOIDmode)
3315 abort ();
3316
3317 if (byte % GET_MODE_SIZE (outermode)
3318 || byte >= GET_MODE_SIZE (innermode))
3319 abort ();
3320
3321 if (GET_CODE (op) == QUEUED)
3322 return NULL_RTX;
3323
3324 new = simplify_subreg (outermode, op, innermode, byte);
3325 if (new)
3326 return new;
3327
3328 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3329 return NULL_RTX;
3330
3331 return gen_rtx_SUBREG (outermode, op, byte);
3332 }
3333 /* Simplify X, an rtx expression.
3334
3335 Return the simplified expression or NULL if no simplifications
3336 were possible.
3337
3338 This is the preferred entry point into the simplification routines;
3339 however, we still allow passes to call the more specific routines.
3340
3341 Right now GCC has three (yes, three) major bodies of RTL simplification
3342 code that need to be unified.
3343
3344 1. fold_rtx in cse.c. This code uses various CSE specific
3345 information to aid in RTL simplification.
3346
3347 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3348 it uses combine specific information to aid in RTL
3349 simplification.
3350
3351 3. The routines in this file.
3352
3353
3354 Long term we want to only have one body of simplification code; to
3355 get to that state I recommend the following steps:
3356
3357 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3358 which are not pass dependent state into these routines.
3359
3360 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3361 use this routine whenever possible.
3362
3363 3. Allow for pass dependent state to be provided to these
3364 routines and add simplifications based on the pass dependent
3365 state. Remove code from cse.c & combine.c that becomes
3366 redundant/dead.
3367
3368 It will take time, but ultimately the compiler will be easier to
3369 maintain and improve. It's totally silly that when we add a
3370 simplification that it needs to be added to 4 places (3 for RTL
3371 simplification and 1 for tree simplification. */
3372
3373 rtx
3374 simplify_rtx (rtx x)
3375 {
3376 enum rtx_code code = GET_CODE (x);
3377 enum machine_mode mode = GET_MODE (x);
3378 rtx temp;
3379
3380 switch (GET_RTX_CLASS (code))
3381 {
3382 case '1':
3383 return simplify_unary_operation (code, mode,
3384 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3385 case 'c':
3386 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3387 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3388
3389 /* Fall through.... */
3390
3391 case '2':
3392 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3393
3394 case '3':
3395 case 'b':
3396 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3397 XEXP (x, 0), XEXP (x, 1),
3398 XEXP (x, 2));
3399
3400 case '<':
3401 temp = simplify_relational_operation (code,
3402 ((GET_MODE (XEXP (x, 0))
3403 != VOIDmode)
3404 ? GET_MODE (XEXP (x, 0))
3405 : GET_MODE (XEXP (x, 1))),
3406 XEXP (x, 0), XEXP (x, 1));
3407 #ifdef FLOAT_STORE_FLAG_VALUE
3408 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3409 {
3410 if (temp == const0_rtx)
3411 temp = CONST0_RTX (mode);
3412 else
3413 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3414 mode);
3415 }
3416 #endif
3417 return temp;
3418
3419 case 'x':
3420 if (code == SUBREG)
3421 return simplify_gen_subreg (mode, SUBREG_REG (x),
3422 GET_MODE (SUBREG_REG (x)),
3423 SUBREG_BYTE (x));
3424 if (code == CONSTANT_P_RTX)
3425 {
3426 if (CONSTANT_P (XEXP (x, 0)))
3427 return const1_rtx;
3428 }
3429 break;
3430
3431 case 'o':
3432 if (code == LO_SUM)
3433 {
3434 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3435 if (GET_CODE (XEXP (x, 0)) == HIGH
3436 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3437 return XEXP (x, 1);
3438 }
3439 break;
3440
3441 default:
3442 break;
3443 }
3444 return NULL;
3445 }