c-pretty-print.c: Fix comment typos.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 \f
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
64 {
65 return gen_int_mode (- INTVAL (i), mode);
66 }
67
68 \f
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
71
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
75 {
76 rtx tem;
77
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
82
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
87
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
90
91 if (code == PLUS || code == MINUS)
92 {
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
96 }
97
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
99 }
100 \f
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
103 rtx
104 avoid_constant_pool_reference (rtx x)
105 {
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
108
109 switch (GET_CODE (x))
110 {
111 case MEM:
112 break;
113
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
119 {
120 REAL_VALUE_TYPE d;
121
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
124 }
125 return x;
126
127 default:
128 return x;
129 }
130
131 addr = XEXP (x, 0);
132
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
135
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
138
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
142
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
145
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
150 {
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
153 }
154
155 return c;
156 }
157 \f
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
160
161 rtx
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
164 {
165 rtx tem;
166
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
170
171 return gen_rtx_fmt_e (code, mode, op);
172 }
173
174 /* Likewise for ternary operations. */
175
176 rtx
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
179 {
180 rtx tem;
181
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
186
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
188 }
189 \f
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
192 */
193
194 rtx
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
197 {
198 rtx tem;
199
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
204
205 if (cmp_mode != VOIDmode)
206 {
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
208
209 if (tem)
210 {
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
213 {
214 REAL_VALUE_TYPE val;
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
218 abort ();
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
221 }
222 #endif
223 return tem;
224 }
225 }
226
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
231
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
236
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
239 {
240 if (code == NE)
241 {
242 if (GET_MODE (op0) == mode)
243 return op0;
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
246 }
247 else if (code == EQ)
248 {
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
250 if (new != UNKNOWN)
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
253 }
254 }
255
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
257 }
258 \f
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
261
262 rtx
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
264 {
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
271
272 if (x == old)
273 return new;
274
275 switch (GET_RTX_CLASS (code))
276 {
277 case '1':
278 {
279 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
280 rtx op = (XEXP (x, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
282
283 return simplify_gen_unary (code, mode, op, op_mode);
284 }
285
286 case '2':
287 case 'c':
288 return
289 simplify_gen_binary (code, mode,
290 simplify_replace_rtx (XEXP (x, 0), old, new),
291 simplify_replace_rtx (XEXP (x, 1), old, new));
292 case '<':
293 {
294 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x, 0))
296 : GET_MODE (XEXP (x, 1)));
297 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
298 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 }
301
302 case '3':
303 case 'b':
304 {
305 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
306 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307
308 return
309 simplify_gen_ternary (code, mode,
310 (op_mode != VOIDmode
311 ? op_mode
312 : GET_MODE (op0)),
313 op0,
314 simplify_replace_rtx (XEXP (x, 1), old, new),
315 simplify_replace_rtx (XEXP (x, 2), old, new));
316 }
317
318 case 'x':
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 rtx exp;
323 exp = simplify_gen_subreg (GET_MODE (x),
324 simplify_replace_rtx (SUBREG_REG (x),
325 old, new),
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 if (exp)
329 x = exp;
330 }
331 return x;
332
333 case 'o':
334 if (code == MEM)
335 return replace_equiv_address_nv (x,
336 simplify_replace_rtx (XEXP (x, 0),
337 old, new));
338 else if (code == LO_SUM)
339 {
340 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
341 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
345 return op1;
346
347 return gen_rtx_LO_SUM (mode, op0, op1);
348 }
349 else if (code == REG)
350 {
351 if (REG_P (old) && REGNO (x) == REGNO (old))
352 return new;
353 }
354
355 return x;
356
357 default:
358 return x;
359 }
360 return x;
361 }
362 \f
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
366 rtx
367 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
368 rtx op, enum machine_mode op_mode)
369 {
370 unsigned int width = GET_MODE_BITSIZE (mode);
371 rtx trueop = avoid_constant_pool_reference (op);
372
373 if (code == VEC_DUPLICATE)
374 {
375 if (!VECTOR_MODE_P (mode))
376 abort ();
377 if (GET_MODE (trueop) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop))
379 && GET_MODE_INNER (mode) != GET_MODE (trueop))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
384 abort ();
385 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
386 || GET_CODE (trueop) == CONST_VECTOR)
387 {
388 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
389 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
390 rtvec v = rtvec_alloc (n_elts);
391 unsigned int i;
392
393 if (GET_CODE (trueop) != CONST_VECTOR)
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = trueop;
396 else
397 {
398 enum machine_mode inmode = GET_MODE (trueop);
399 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
400 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
401
402 if (in_n_elts >= n_elts || n_elts % in_n_elts)
403 abort ();
404 for (i = 0; i < n_elts; i++)
405 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
406 }
407 return gen_rtx_CONST_VECTOR (mode, v);
408 }
409 }
410
411 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
412 {
413 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
414 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
415 enum machine_mode opmode = GET_MODE (trueop);
416 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
417 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
418 rtvec v = rtvec_alloc (n_elts);
419 unsigned int i;
420
421 if (op_n_elts != n_elts)
422 abort ();
423
424 for (i = 0; i < n_elts; i++)
425 {
426 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
427 CONST_VECTOR_ELT (trueop, i),
428 GET_MODE_INNER (opmode));
429 if (!x)
430 return 0;
431 RTVEC_ELT (v, i) = x;
432 }
433 return gen_rtx_CONST_VECTOR (mode, v);
434 }
435
436 /* The order of these tests is critical so that, for example, we don't
437 check the wrong mode (input vs. output) for a conversion operation,
438 such as FIX. At some point, this should be simplified. */
439
440 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
441 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
442 {
443 HOST_WIDE_INT hv, lv;
444 REAL_VALUE_TYPE d;
445
446 if (GET_CODE (trueop) == CONST_INT)
447 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
448 else
449 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
450
451 REAL_VALUE_FROM_INT (d, lv, hv, mode);
452 d = real_value_truncate (mode, d);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
454 }
455 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
456 && (GET_CODE (trueop) == CONST_DOUBLE
457 || GET_CODE (trueop) == CONST_INT))
458 {
459 HOST_WIDE_INT hv, lv;
460 REAL_VALUE_TYPE d;
461
462 if (GET_CODE (trueop) == CONST_INT)
463 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
464 else
465 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
466
467 if (op_mode == VOIDmode)
468 {
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
471 if (hv < 0)
472 return 0;
473 }
474 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
475 ;
476 else
477 hv = 0, lv &= GET_MODE_MASK (op_mode);
478
479 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
480 d = real_value_truncate (mode, d);
481 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
482 }
483
484 if (GET_CODE (trueop) == CONST_INT
485 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
486 {
487 HOST_WIDE_INT arg0 = INTVAL (trueop);
488 HOST_WIDE_INT val;
489
490 switch (code)
491 {
492 case NOT:
493 val = ~ arg0;
494 break;
495
496 case NEG:
497 val = - arg0;
498 break;
499
500 case ABS:
501 val = (arg0 >= 0 ? arg0 : - arg0);
502 break;
503
504 case FFS:
505 /* Don't use ffs here. Instead, get low order bit and then its
506 number. If arg0 is zero, this will return 0, as desired. */
507 arg0 &= GET_MODE_MASK (mode);
508 val = exact_log2 (arg0 & (- arg0)) + 1;
509 break;
510
511 case CLZ:
512 arg0 &= GET_MODE_MASK (mode);
513 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
514 ;
515 else
516 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
517 break;
518
519 case CTZ:
520 arg0 &= GET_MODE_MASK (mode);
521 if (arg0 == 0)
522 {
523 /* Even if the value at zero is undefined, we have to come
524 up with some replacement. Seems good enough. */
525 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
526 val = GET_MODE_BITSIZE (mode);
527 }
528 else
529 val = exact_log2 (arg0 & -arg0);
530 break;
531
532 case POPCOUNT:
533 arg0 &= GET_MODE_MASK (mode);
534 val = 0;
535 while (arg0)
536 val++, arg0 &= arg0 - 1;
537 break;
538
539 case PARITY:
540 arg0 &= GET_MODE_MASK (mode);
541 val = 0;
542 while (arg0)
543 val++, arg0 &= arg0 - 1;
544 val &= 1;
545 break;
546
547 case TRUNCATE:
548 val = arg0;
549 break;
550
551 case ZERO_EXTEND:
552 /* When zero-extending a CONST_INT, we need to know its
553 original mode. */
554 if (op_mode == VOIDmode)
555 abort ();
556 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
557 {
558 /* If we were really extending the mode,
559 we would have to distinguish between zero-extension
560 and sign-extension. */
561 if (width != GET_MODE_BITSIZE (op_mode))
562 abort ();
563 val = arg0;
564 }
565 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
566 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
567 else
568 return 0;
569 break;
570
571 case SIGN_EXTEND:
572 if (op_mode == VOIDmode)
573 op_mode = mode;
574 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
575 {
576 /* If we were really extending the mode,
577 we would have to distinguish between zero-extension
578 and sign-extension. */
579 if (width != GET_MODE_BITSIZE (op_mode))
580 abort ();
581 val = arg0;
582 }
583 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
584 {
585 val
586 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
587 if (val
588 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
589 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 }
591 else
592 return 0;
593 break;
594
595 case SQRT:
596 case FLOAT_EXTEND:
597 case FLOAT_TRUNCATE:
598 case SS_TRUNCATE:
599 case US_TRUNCATE:
600 return 0;
601
602 default:
603 abort ();
604 }
605
606 val = trunc_int_for_mode (val, mode);
607
608 return GEN_INT (val);
609 }
610
611 /* We can do some operations on integer CONST_DOUBLEs. Also allow
612 for a DImode operation on a CONST_INT. */
613 else if (GET_MODE (trueop) == VOIDmode
614 && width <= HOST_BITS_PER_WIDE_INT * 2
615 && (GET_CODE (trueop) == CONST_DOUBLE
616 || GET_CODE (trueop) == CONST_INT))
617 {
618 unsigned HOST_WIDE_INT l1, lv;
619 HOST_WIDE_INT h1, hv;
620
621 if (GET_CODE (trueop) == CONST_DOUBLE)
622 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
623 else
624 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
625
626 switch (code)
627 {
628 case NOT:
629 lv = ~ l1;
630 hv = ~ h1;
631 break;
632
633 case NEG:
634 neg_double (l1, h1, &lv, &hv);
635 break;
636
637 case ABS:
638 if (h1 < 0)
639 neg_double (l1, h1, &lv, &hv);
640 else
641 lv = l1, hv = h1;
642 break;
643
644 case FFS:
645 hv = 0;
646 if (l1 == 0)
647 {
648 if (h1 == 0)
649 lv = 0;
650 else
651 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
652 }
653 else
654 lv = exact_log2 (l1 & -l1) + 1;
655 break;
656
657 case CLZ:
658 hv = 0;
659 if (h1 != 0)
660 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
661 - HOST_BITS_PER_WIDE_INT;
662 else if (l1 != 0)
663 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
664 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
667
668 case CTZ:
669 hv = 0;
670 if (l1 != 0)
671 lv = exact_log2 (l1 & -l1);
672 else if (h1 != 0)
673 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
674 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
675 lv = GET_MODE_BITSIZE (mode);
676 break;
677
678 case POPCOUNT:
679 hv = 0;
680 lv = 0;
681 while (l1)
682 lv++, l1 &= l1 - 1;
683 while (h1)
684 lv++, h1 &= h1 - 1;
685 break;
686
687 case PARITY:
688 hv = 0;
689 lv = 0;
690 while (l1)
691 lv++, l1 &= l1 - 1;
692 while (h1)
693 lv++, h1 &= h1 - 1;
694 lv &= 1;
695 break;
696
697 case TRUNCATE:
698 /* This is just a change-of-mode, so do nothing. */
699 lv = l1, hv = h1;
700 break;
701
702 case ZERO_EXTEND:
703 if (op_mode == VOIDmode)
704 abort ();
705
706 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
707 return 0;
708
709 hv = 0;
710 lv = l1 & GET_MODE_MASK (op_mode);
711 break;
712
713 case SIGN_EXTEND:
714 if (op_mode == VOIDmode
715 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
716 return 0;
717 else
718 {
719 lv = l1 & GET_MODE_MASK (op_mode);
720 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
721 && (lv & ((HOST_WIDE_INT) 1
722 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
723 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
724
725 hv = HWI_SIGN_EXTEND (lv);
726 }
727 break;
728
729 case SQRT:
730 return 0;
731
732 default:
733 return 0;
734 }
735
736 return immed_double_const (lv, hv, mode);
737 }
738
739 else if (GET_CODE (trueop) == CONST_DOUBLE
740 && GET_MODE_CLASS (mode) == MODE_FLOAT)
741 {
742 REAL_VALUE_TYPE d, t;
743 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
744
745 switch (code)
746 {
747 case SQRT:
748 if (HONOR_SNANS (mode) && real_isnan (&d))
749 return 0;
750 real_sqrt (&t, mode, &d);
751 d = t;
752 break;
753 case ABS:
754 d = REAL_VALUE_ABS (d);
755 break;
756 case NEG:
757 d = REAL_VALUE_NEGATE (d);
758 break;
759 case FLOAT_TRUNCATE:
760 d = real_value_truncate (mode, d);
761 break;
762 case FLOAT_EXTEND:
763 /* All this does is change the mode. */
764 break;
765 case FIX:
766 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
767 break;
768
769 default:
770 abort ();
771 }
772 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
773 }
774
775 else if (GET_CODE (trueop) == CONST_DOUBLE
776 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
777 && GET_MODE_CLASS (mode) == MODE_INT
778 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
779 {
780 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
781 operators are intentionally left unspecified (to ease implementation
782 by target backends), for consistency, this routine implements the
783 same semantics for constant folding as used by the middle-end. */
784
785 HOST_WIDE_INT xh, xl, th, tl;
786 REAL_VALUE_TYPE x, t;
787 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
788 switch (code)
789 {
790 case FIX:
791 if (REAL_VALUE_ISNAN (x))
792 return const0_rtx;
793
794 /* Test against the signed upper bound. */
795 if (width > HOST_BITS_PER_WIDE_INT)
796 {
797 th = ((unsigned HOST_WIDE_INT) 1
798 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
799 tl = -1;
800 }
801 else
802 {
803 th = 0;
804 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
805 }
806 real_from_integer (&t, VOIDmode, tl, th, 0);
807 if (REAL_VALUES_LESS (t, x))
808 {
809 xh = th;
810 xl = tl;
811 break;
812 }
813
814 /* Test against the signed lower bound. */
815 if (width > HOST_BITS_PER_WIDE_INT)
816 {
817 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
818 tl = 0;
819 }
820 else
821 {
822 th = -1;
823 tl = (HOST_WIDE_INT) -1 << (width - 1);
824 }
825 real_from_integer (&t, VOIDmode, tl, th, 0);
826 if (REAL_VALUES_LESS (x, t))
827 {
828 xh = th;
829 xl = tl;
830 break;
831 }
832 REAL_VALUE_TO_INT (&xl, &xh, x);
833 break;
834
835 case UNSIGNED_FIX:
836 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
837 return const0_rtx;
838
839 /* Test against the unsigned upper bound. */
840 if (width == 2*HOST_BITS_PER_WIDE_INT)
841 {
842 th = -1;
843 tl = -1;
844 }
845 else if (width >= HOST_BITS_PER_WIDE_INT)
846 {
847 th = ((unsigned HOST_WIDE_INT) 1
848 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
849 tl = -1;
850 }
851 else
852 {
853 th = 0;
854 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
855 }
856 real_from_integer (&t, VOIDmode, tl, th, 1);
857 if (REAL_VALUES_LESS (t, x))
858 {
859 xh = th;
860 xl = tl;
861 break;
862 }
863
864 REAL_VALUE_TO_INT (&xl, &xh, x);
865 break;
866
867 default:
868 abort ();
869 }
870 return immed_double_const (xl, xh, mode);
871 }
872
873 /* This was formerly used only for non-IEEE float.
874 eggert@twinsun.com says it is safe for IEEE also. */
875 else
876 {
877 enum rtx_code reversed;
878 rtx temp;
879
880 /* There are some simplifications we can do even if the operands
881 aren't constant. */
882 switch (code)
883 {
884 case NOT:
885 /* (not (not X)) == X. */
886 if (GET_CODE (op) == NOT)
887 return XEXP (op, 0);
888
889 /* (not (eq X Y)) == (ne X Y), etc. */
890 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
891 && (mode == BImode || STORE_FLAG_VALUE == -1)
892 && ((reversed = reversed_comparison_code (op, NULL_RTX))
893 != UNKNOWN))
894 return simplify_gen_relational (reversed, mode, VOIDmode,
895 XEXP (op, 0), XEXP (op, 1));
896
897 /* (not (plus X -1)) can become (neg X). */
898 if (GET_CODE (op) == PLUS
899 && XEXP (op, 1) == constm1_rtx)
900 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901
902 /* Similarly, (not (neg X)) is (plus X -1). */
903 if (GET_CODE (op) == NEG)
904 return plus_constant (XEXP (op, 0), -1);
905
906 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == XOR
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && (temp = simplify_unary_operation (NOT, mode,
910 XEXP (op, 1),
911 mode)) != 0)
912 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913
914
915 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
916 operands other than 1, but that is not valid. We could do a
917 similar simplification for (not (lshiftrt C X)) where C is
918 just the sign bit, but this doesn't seem common enough to
919 bother with. */
920 if (GET_CODE (op) == ASHIFT
921 && XEXP (op, 0) == const1_rtx)
922 {
923 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
924 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
925 }
926
927 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
928 by reversing the comparison code if valid. */
929 if (STORE_FLAG_VALUE == -1
930 && GET_RTX_CLASS (GET_CODE (op)) == '<'
931 && (reversed = reversed_comparison_code (op, NULL_RTX))
932 != UNKNOWN)
933 return simplify_gen_relational (reversed, mode, VOIDmode,
934 XEXP (op, 0), XEXP (op, 1));
935
936 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
937 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
938 so we can perform the above simplification. */
939
940 if (STORE_FLAG_VALUE == -1
941 && GET_CODE (op) == ASHIFTRT
942 && GET_CODE (XEXP (op, 1)) == CONST_INT
943 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
944 return simplify_gen_relational (GE, mode, VOIDmode,
945 XEXP (op, 0), const0_rtx);
946
947 break;
948
949 case NEG:
950 /* (neg (neg X)) == X. */
951 if (GET_CODE (op) == NEG)
952 return XEXP (op, 0);
953
954 /* (neg (plus X 1)) can become (not X). */
955 if (GET_CODE (op) == PLUS
956 && XEXP (op, 1) == const1_rtx)
957 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
958
959 /* Similarly, (neg (not X)) is (plus X 1). */
960 if (GET_CODE (op) == NOT)
961 return plus_constant (XEXP (op, 0), 1);
962
963 /* (neg (minus X Y)) can become (minus Y X). This transformation
964 isn't safe for modes with signed zeros, since if X and Y are
965 both +0, (minus Y X) is the same as (minus X Y). If the
966 rounding mode is towards +infinity (or -infinity) then the two
967 expressions will be rounded differently. */
968 if (GET_CODE (op) == MINUS
969 && !HONOR_SIGNED_ZEROS (mode)
970 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
971 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
972 XEXP (op, 0));
973
974 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
975 if (GET_CODE (op) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 {
979 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
980 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
981 }
982
983 /* (neg (mult A B)) becomes (mult (neg A) B).
984 This works even for floating-point values. */
985 if (GET_CODE (op) == MULT
986 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 {
988 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
989 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
990 }
991
992 /* NEG commutes with ASHIFT since it is multiplication. Only do
993 this if we can then eliminate the NEG (e.g., if the operand
994 is a constant). */
995 if (GET_CODE (op) == ASHIFT)
996 {
997 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
998 mode);
999 if (temp)
1000 return simplify_gen_binary (ASHIFT, mode, temp,
1001 XEXP (op, 1));
1002 }
1003
1004 break;
1005
1006 case SIGN_EXTEND:
1007 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1008 becomes just the MINUS if its mode is MODE. This allows
1009 folding switch statements on machines using casesi (such as
1010 the VAX). */
1011 if (GET_CODE (op) == TRUNCATE
1012 && GET_MODE (XEXP (op, 0)) == mode
1013 && GET_CODE (XEXP (op, 0)) == MINUS
1014 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1015 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1016 return XEXP (op, 0);
1017
1018 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1019 if (! POINTERS_EXTEND_UNSIGNED
1020 && mode == Pmode && GET_MODE (op) == ptr_mode
1021 && (CONSTANT_P (op)
1022 || (GET_CODE (op) == SUBREG
1023 && GET_CODE (SUBREG_REG (op)) == REG
1024 && REG_POINTER (SUBREG_REG (op))
1025 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1026 return convert_memory_address (Pmode, op);
1027 #endif
1028 break;
1029
1030 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1031 case ZERO_EXTEND:
1032 if (POINTERS_EXTEND_UNSIGNED > 0
1033 && mode == Pmode && GET_MODE (op) == ptr_mode
1034 && (CONSTANT_P (op)
1035 || (GET_CODE (op) == SUBREG
1036 && GET_CODE (SUBREG_REG (op)) == REG
1037 && REG_POINTER (SUBREG_REG (op))
1038 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1039 return convert_memory_address (Pmode, op);
1040 break;
1041 #endif
1042
1043 default:
1044 break;
1045 }
1046
1047 return 0;
1048 }
1049 }
1050 \f
1051 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1052 is a suitable integer or floating point immediate constant. */
1053 static bool
1054 associative_constant_p (rtx op)
1055 {
1056 if (GET_CODE (op) == CONST_INT
1057 || GET_CODE (op) == CONST_DOUBLE)
1058 return true;
1059 op = avoid_constant_pool_reference (op);
1060 return GET_CODE (op) == CONST_INT
1061 || GET_CODE (op) == CONST_DOUBLE;
1062 }
1063
1064 /* Subroutine of simplify_binary_operation to simplify an associative
1065 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1066 Return 0 if no simplification is possible. */
1067 static rtx
1068 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1069 rtx op0, rtx op1)
1070 {
1071 rtx tem;
1072
1073 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1074 if (GET_CODE (op0) == code
1075 && associative_constant_p (op1)
1076 && associative_constant_p (XEXP (op0, 1)))
1077 {
1078 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1079 if (! tem)
1080 return tem;
1081 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1082 }
1083
1084 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1085 if (GET_CODE (op0) == code
1086 && GET_CODE (op1) == code
1087 && associative_constant_p (XEXP (op0, 1))
1088 && associative_constant_p (XEXP (op1, 1)))
1089 {
1090 rtx c = simplify_binary_operation (code, mode,
1091 XEXP (op0, 1), XEXP (op1, 1));
1092 if (! c)
1093 return 0;
1094 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1095 return simplify_gen_binary (code, mode, tem, c);
1096 }
1097
1098 /* Canonicalize (x op c) op y as (x op y) op c. */
1099 if (GET_CODE (op0) == code
1100 && associative_constant_p (XEXP (op0, 1)))
1101 {
1102 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1103 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1104 }
1105
1106 /* Canonicalize x op (y op c) as (x op y) op c. */
1107 if (GET_CODE (op1) == code
1108 && associative_constant_p (XEXP (op1, 1)))
1109 {
1110 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1111 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1112 }
1113
1114 return 0;
1115 }
1116
1117 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1118 and OP1. Return 0 if no simplification is possible.
1119
1120 Don't use this for relational operations such as EQ or LT.
1121 Use simplify_relational_operation instead. */
1122 rtx
1123 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1124 rtx op0, rtx op1)
1125 {
1126 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1127 HOST_WIDE_INT val;
1128 unsigned int width = GET_MODE_BITSIZE (mode);
1129 rtx tem;
1130 rtx trueop0 = avoid_constant_pool_reference (op0);
1131 rtx trueop1 = avoid_constant_pool_reference (op1);
1132
1133 /* Relational operations don't work here. We must know the mode
1134 of the operands in order to do the comparison correctly.
1135 Assuming a full word can give incorrect results.
1136 Consider comparing 128 with -128 in QImode. */
1137
1138 if (GET_RTX_CLASS (code) == '<')
1139 abort ();
1140
1141 /* Make sure the constant is second. */
1142 if (GET_RTX_CLASS (code) == 'c'
1143 && swap_commutative_operands_p (trueop0, trueop1))
1144 {
1145 tem = op0, op0 = op1, op1 = tem;
1146 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1147 }
1148
1149 if (VECTOR_MODE_P (mode)
1150 && GET_CODE (trueop0) == CONST_VECTOR
1151 && GET_CODE (trueop1) == CONST_VECTOR)
1152 {
1153 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1154 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1155 enum machine_mode op0mode = GET_MODE (trueop0);
1156 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1157 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1158 enum machine_mode op1mode = GET_MODE (trueop1);
1159 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1160 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1161 rtvec v = rtvec_alloc (n_elts);
1162 unsigned int i;
1163
1164 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1165 abort ();
1166
1167 for (i = 0; i < n_elts; i++)
1168 {
1169 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1170 CONST_VECTOR_ELT (trueop0, i),
1171 CONST_VECTOR_ELT (trueop1, i));
1172 if (!x)
1173 return 0;
1174 RTVEC_ELT (v, i) = x;
1175 }
1176
1177 return gen_rtx_CONST_VECTOR (mode, v);
1178 }
1179
1180 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1181 && GET_CODE (trueop0) == CONST_DOUBLE
1182 && GET_CODE (trueop1) == CONST_DOUBLE
1183 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1184 {
1185 REAL_VALUE_TYPE f0, f1, value;
1186
1187 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1188 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1189 f0 = real_value_truncate (mode, f0);
1190 f1 = real_value_truncate (mode, f1);
1191
1192 if (HONOR_SNANS (mode)
1193 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1194 return 0;
1195
1196 if (code == DIV
1197 && REAL_VALUES_EQUAL (f1, dconst0)
1198 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1199 return 0;
1200
1201 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1202
1203 value = real_value_truncate (mode, value);
1204 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1205 }
1206
1207 /* We can fold some multi-word operations. */
1208 if (GET_MODE_CLASS (mode) == MODE_INT
1209 && width == HOST_BITS_PER_WIDE_INT * 2
1210 && (GET_CODE (trueop0) == CONST_DOUBLE
1211 || GET_CODE (trueop0) == CONST_INT)
1212 && (GET_CODE (trueop1) == CONST_DOUBLE
1213 || GET_CODE (trueop1) == CONST_INT))
1214 {
1215 unsigned HOST_WIDE_INT l1, l2, lv;
1216 HOST_WIDE_INT h1, h2, hv;
1217
1218 if (GET_CODE (trueop0) == CONST_DOUBLE)
1219 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1220 else
1221 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1222
1223 if (GET_CODE (trueop1) == CONST_DOUBLE)
1224 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1225 else
1226 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1227
1228 switch (code)
1229 {
1230 case MINUS:
1231 /* A - B == A + (-B). */
1232 neg_double (l2, h2, &lv, &hv);
1233 l2 = lv, h2 = hv;
1234
1235 /* Fall through.... */
1236
1237 case PLUS:
1238 add_double (l1, h1, l2, h2, &lv, &hv);
1239 break;
1240
1241 case MULT:
1242 mul_double (l1, h1, l2, h2, &lv, &hv);
1243 break;
1244
1245 case DIV: case MOD: case UDIV: case UMOD:
1246 /* We'd need to include tree.h to do this and it doesn't seem worth
1247 it. */
1248 return 0;
1249
1250 case AND:
1251 lv = l1 & l2, hv = h1 & h2;
1252 break;
1253
1254 case IOR:
1255 lv = l1 | l2, hv = h1 | h2;
1256 break;
1257
1258 case XOR:
1259 lv = l1 ^ l2, hv = h1 ^ h2;
1260 break;
1261
1262 case SMIN:
1263 if (h1 < h2
1264 || (h1 == h2
1265 && ((unsigned HOST_WIDE_INT) l1
1266 < (unsigned HOST_WIDE_INT) l2)))
1267 lv = l1, hv = h1;
1268 else
1269 lv = l2, hv = h2;
1270 break;
1271
1272 case SMAX:
1273 if (h1 > h2
1274 || (h1 == h2
1275 && ((unsigned HOST_WIDE_INT) l1
1276 > (unsigned HOST_WIDE_INT) l2)))
1277 lv = l1, hv = h1;
1278 else
1279 lv = l2, hv = h2;
1280 break;
1281
1282 case UMIN:
1283 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1284 || (h1 == h2
1285 && ((unsigned HOST_WIDE_INT) l1
1286 < (unsigned HOST_WIDE_INT) l2)))
1287 lv = l1, hv = h1;
1288 else
1289 lv = l2, hv = h2;
1290 break;
1291
1292 case UMAX:
1293 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1294 || (h1 == h2
1295 && ((unsigned HOST_WIDE_INT) l1
1296 > (unsigned HOST_WIDE_INT) l2)))
1297 lv = l1, hv = h1;
1298 else
1299 lv = l2, hv = h2;
1300 break;
1301
1302 case LSHIFTRT: case ASHIFTRT:
1303 case ASHIFT:
1304 case ROTATE: case ROTATERT:
1305 #ifdef SHIFT_COUNT_TRUNCATED
1306 if (SHIFT_COUNT_TRUNCATED)
1307 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1308 #endif
1309
1310 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1311 return 0;
1312
1313 if (code == LSHIFTRT || code == ASHIFTRT)
1314 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1315 code == ASHIFTRT);
1316 else if (code == ASHIFT)
1317 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1318 else if (code == ROTATE)
1319 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1320 else /* code == ROTATERT */
1321 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1322 break;
1323
1324 default:
1325 return 0;
1326 }
1327
1328 return immed_double_const (lv, hv, mode);
1329 }
1330
1331 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1332 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1333 {
1334 /* Even if we can't compute a constant result,
1335 there are some cases worth simplifying. */
1336
1337 switch (code)
1338 {
1339 case PLUS:
1340 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1341 when x is NaN, infinite, or finite and nonzero. They aren't
1342 when x is -0 and the rounding mode is not towards -infinity,
1343 since (-0) + 0 is then 0. */
1344 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1345 return op0;
1346
1347 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1348 transformations are safe even for IEEE. */
1349 if (GET_CODE (op0) == NEG)
1350 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1351 else if (GET_CODE (op1) == NEG)
1352 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1353
1354 /* (~a) + 1 -> -a */
1355 if (INTEGRAL_MODE_P (mode)
1356 && GET_CODE (op0) == NOT
1357 && trueop1 == const1_rtx)
1358 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1359
1360 /* Handle both-operands-constant cases. We can only add
1361 CONST_INTs to constants since the sum of relocatable symbols
1362 can't be handled by most assemblers. Don't add CONST_INT
1363 to CONST_INT since overflow won't be computed properly if wider
1364 than HOST_BITS_PER_WIDE_INT. */
1365
1366 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1367 && GET_CODE (op1) == CONST_INT)
1368 return plus_constant (op0, INTVAL (op1));
1369 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1370 && GET_CODE (op0) == CONST_INT)
1371 return plus_constant (op1, INTVAL (op0));
1372
1373 /* See if this is something like X * C - X or vice versa or
1374 if the multiplication is written as a shift. If so, we can
1375 distribute and make a new multiply, shift, or maybe just
1376 have X (if C is 2 in the example above). But don't make
1377 real multiply if we didn't have one before. */
1378
1379 if (! FLOAT_MODE_P (mode))
1380 {
1381 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1382 rtx lhs = op0, rhs = op1;
1383 int had_mult = 0;
1384
1385 if (GET_CODE (lhs) == NEG)
1386 coeff0 = -1, lhs = XEXP (lhs, 0);
1387 else if (GET_CODE (lhs) == MULT
1388 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1389 {
1390 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1391 had_mult = 1;
1392 }
1393 else if (GET_CODE (lhs) == ASHIFT
1394 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1395 && INTVAL (XEXP (lhs, 1)) >= 0
1396 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1397 {
1398 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1399 lhs = XEXP (lhs, 0);
1400 }
1401
1402 if (GET_CODE (rhs) == NEG)
1403 coeff1 = -1, rhs = XEXP (rhs, 0);
1404 else if (GET_CODE (rhs) == MULT
1405 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1406 {
1407 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1408 had_mult = 1;
1409 }
1410 else if (GET_CODE (rhs) == ASHIFT
1411 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1412 && INTVAL (XEXP (rhs, 1)) >= 0
1413 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1414 {
1415 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1416 rhs = XEXP (rhs, 0);
1417 }
1418
1419 if (rtx_equal_p (lhs, rhs))
1420 {
1421 tem = simplify_gen_binary (MULT, mode, lhs,
1422 GEN_INT (coeff0 + coeff1));
1423 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1424 }
1425 }
1426
1427 /* If one of the operands is a PLUS or a MINUS, see if we can
1428 simplify this by the associative law.
1429 Don't use the associative law for floating point.
1430 The inaccuracy makes it nonassociative,
1431 and subtle programs can break if operations are associated. */
1432
1433 if (INTEGRAL_MODE_P (mode)
1434 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1435 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1436 || (GET_CODE (op0) == CONST
1437 && GET_CODE (XEXP (op0, 0)) == PLUS)
1438 || (GET_CODE (op1) == CONST
1439 && GET_CODE (XEXP (op1, 0)) == PLUS))
1440 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1441 return tem;
1442
1443 /* Reassociate floating point addition only when the user
1444 specifies unsafe math optimizations. */
1445 if (FLOAT_MODE_P (mode)
1446 && flag_unsafe_math_optimizations)
1447 {
1448 tem = simplify_associative_operation (code, mode, op0, op1);
1449 if (tem)
1450 return tem;
1451 }
1452 break;
1453
1454 case COMPARE:
1455 #ifdef HAVE_cc0
1456 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1457 using cc0, in which case we want to leave it as a COMPARE
1458 so we can distinguish it from a register-register-copy.
1459
1460 In IEEE floating point, x-0 is not the same as x. */
1461
1462 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1463 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1464 && trueop1 == CONST0_RTX (mode))
1465 return op0;
1466 #endif
1467
1468 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1469 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1470 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1471 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1472 {
1473 rtx xop00 = XEXP (op0, 0);
1474 rtx xop10 = XEXP (op1, 0);
1475
1476 #ifdef HAVE_cc0
1477 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1478 #else
1479 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1480 && GET_MODE (xop00) == GET_MODE (xop10)
1481 && REGNO (xop00) == REGNO (xop10)
1482 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1483 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1484 #endif
1485 return xop00;
1486 }
1487 break;
1488
1489 case MINUS:
1490 /* We can't assume x-x is 0 even with non-IEEE floating point,
1491 but since it is zero except in very strange circumstances, we
1492 will treat it as zero with -funsafe-math-optimizations. */
1493 if (rtx_equal_p (trueop0, trueop1)
1494 && ! side_effects_p (op0)
1495 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1496 return CONST0_RTX (mode);
1497
1498 /* Change subtraction from zero into negation. (0 - x) is the
1499 same as -x when x is NaN, infinite, or finite and nonzero.
1500 But if the mode has signed zeros, and does not round towards
1501 -infinity, then 0 - 0 is 0, not -0. */
1502 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1503 return simplify_gen_unary (NEG, mode, op1, mode);
1504
1505 /* (-1 - a) is ~a. */
1506 if (trueop0 == constm1_rtx)
1507 return simplify_gen_unary (NOT, mode, op1, mode);
1508
1509 /* Subtracting 0 has no effect unless the mode has signed zeros
1510 and supports rounding towards -infinity. In such a case,
1511 0 - 0 is -0. */
1512 if (!(HONOR_SIGNED_ZEROS (mode)
1513 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1514 && trueop1 == CONST0_RTX (mode))
1515 return op0;
1516
1517 /* See if this is something like X * C - X or vice versa or
1518 if the multiplication is written as a shift. If so, we can
1519 distribute and make a new multiply, shift, or maybe just
1520 have X (if C is 2 in the example above). But don't make
1521 real multiply if we didn't have one before. */
1522
1523 if (! FLOAT_MODE_P (mode))
1524 {
1525 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1526 rtx lhs = op0, rhs = op1;
1527 int had_mult = 0;
1528
1529 if (GET_CODE (lhs) == NEG)
1530 coeff0 = -1, lhs = XEXP (lhs, 0);
1531 else if (GET_CODE (lhs) == MULT
1532 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1533 {
1534 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1535 had_mult = 1;
1536 }
1537 else if (GET_CODE (lhs) == ASHIFT
1538 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1539 && INTVAL (XEXP (lhs, 1)) >= 0
1540 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1541 {
1542 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1543 lhs = XEXP (lhs, 0);
1544 }
1545
1546 if (GET_CODE (rhs) == NEG)
1547 coeff1 = - 1, rhs = XEXP (rhs, 0);
1548 else if (GET_CODE (rhs) == MULT
1549 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1550 {
1551 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1552 had_mult = 1;
1553 }
1554 else if (GET_CODE (rhs) == ASHIFT
1555 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1556 && INTVAL (XEXP (rhs, 1)) >= 0
1557 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1558 {
1559 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1560 rhs = XEXP (rhs, 0);
1561 }
1562
1563 if (rtx_equal_p (lhs, rhs))
1564 {
1565 tem = simplify_gen_binary (MULT, mode, lhs,
1566 GEN_INT (coeff0 - coeff1));
1567 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1568 }
1569 }
1570
1571 /* (a - (-b)) -> (a + b). True even for IEEE. */
1572 if (GET_CODE (op1) == NEG)
1573 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1574
1575 /* If one of the operands is a PLUS or a MINUS, see if we can
1576 simplify this by the associative law.
1577 Don't use the associative law for floating point.
1578 The inaccuracy makes it nonassociative,
1579 and subtle programs can break if operations are associated. */
1580
1581 if (INTEGRAL_MODE_P (mode)
1582 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1583 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1584 || (GET_CODE (op0) == CONST
1585 && GET_CODE (XEXP (op0, 0)) == PLUS)
1586 || (GET_CODE (op1) == CONST
1587 && GET_CODE (XEXP (op1, 0)) == PLUS))
1588 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1589 return tem;
1590
1591 /* Don't let a relocatable value get a negative coeff. */
1592 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1593 return simplify_gen_binary (PLUS, mode,
1594 op0,
1595 neg_const_int (mode, op1));
1596
1597 /* (x - (x & y)) -> (x & ~y) */
1598 if (GET_CODE (op1) == AND)
1599 {
1600 if (rtx_equal_p (op0, XEXP (op1, 0)))
1601 {
1602 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1603 GET_MODE (XEXP (op1, 1)));
1604 return simplify_gen_binary (AND, mode, op0, tem);
1605 }
1606 if (rtx_equal_p (op0, XEXP (op1, 1)))
1607 {
1608 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1609 GET_MODE (XEXP (op1, 0)));
1610 return simplify_gen_binary (AND, mode, op0, tem);
1611 }
1612 }
1613 break;
1614
1615 case MULT:
1616 if (trueop1 == constm1_rtx)
1617 return simplify_gen_unary (NEG, mode, op0, mode);
1618
1619 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1620 x is NaN, since x * 0 is then also NaN. Nor is it valid
1621 when the mode has signed zeros, since multiplying a negative
1622 number by 0 will give -0, not 0. */
1623 if (!HONOR_NANS (mode)
1624 && !HONOR_SIGNED_ZEROS (mode)
1625 && trueop1 == CONST0_RTX (mode)
1626 && ! side_effects_p (op0))
1627 return op1;
1628
1629 /* In IEEE floating point, x*1 is not equivalent to x for
1630 signalling NaNs. */
1631 if (!HONOR_SNANS (mode)
1632 && trueop1 == CONST1_RTX (mode))
1633 return op0;
1634
1635 /* Convert multiply by constant power of two into shift unless
1636 we are still generating RTL. This test is a kludge. */
1637 if (GET_CODE (trueop1) == CONST_INT
1638 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1639 /* If the mode is larger than the host word size, and the
1640 uppermost bit is set, then this isn't a power of two due
1641 to implicit sign extension. */
1642 && (width <= HOST_BITS_PER_WIDE_INT
1643 || val != HOST_BITS_PER_WIDE_INT - 1)
1644 && ! rtx_equal_function_value_matters)
1645 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1646
1647 /* x*2 is x+x and x*(-1) is -x */
1648 if (GET_CODE (trueop1) == CONST_DOUBLE
1649 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1650 && GET_MODE (op0) == mode)
1651 {
1652 REAL_VALUE_TYPE d;
1653 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1654
1655 if (REAL_VALUES_EQUAL (d, dconst2))
1656 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1657
1658 if (REAL_VALUES_EQUAL (d, dconstm1))
1659 return simplify_gen_unary (NEG, mode, op0, mode);
1660 }
1661
1662 /* Reassociate multiplication, but for floating point MULTs
1663 only when the user specifies unsafe math optimizations. */
1664 if (! FLOAT_MODE_P (mode)
1665 || flag_unsafe_math_optimizations)
1666 {
1667 tem = simplify_associative_operation (code, mode, op0, op1);
1668 if (tem)
1669 return tem;
1670 }
1671 break;
1672
1673 case IOR:
1674 if (trueop1 == const0_rtx)
1675 return op0;
1676 if (GET_CODE (trueop1) == CONST_INT
1677 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1678 == GET_MODE_MASK (mode)))
1679 return op1;
1680 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1681 return op0;
1682 /* A | (~A) -> -1 */
1683 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1684 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1685 && ! side_effects_p (op0)
1686 && GET_MODE_CLASS (mode) != MODE_CC)
1687 return constm1_rtx;
1688 tem = simplify_associative_operation (code, mode, op0, op1);
1689 if (tem)
1690 return tem;
1691 break;
1692
1693 case XOR:
1694 if (trueop1 == const0_rtx)
1695 return op0;
1696 if (GET_CODE (trueop1) == CONST_INT
1697 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1698 == GET_MODE_MASK (mode)))
1699 return simplify_gen_unary (NOT, mode, op0, mode);
1700 if (trueop0 == trueop1 && ! side_effects_p (op0)
1701 && GET_MODE_CLASS (mode) != MODE_CC)
1702 return const0_rtx;
1703 tem = simplify_associative_operation (code, mode, op0, op1);
1704 if (tem)
1705 return tem;
1706 break;
1707
1708 case AND:
1709 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1710 return const0_rtx;
1711 if (GET_CODE (trueop1) == CONST_INT
1712 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1713 == GET_MODE_MASK (mode)))
1714 return op0;
1715 if (trueop0 == trueop1 && ! side_effects_p (op0)
1716 && GET_MODE_CLASS (mode) != MODE_CC)
1717 return op0;
1718 /* A & (~A) -> 0 */
1719 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1720 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1721 && ! side_effects_p (op0)
1722 && GET_MODE_CLASS (mode) != MODE_CC)
1723 return const0_rtx;
1724 tem = simplify_associative_operation (code, mode, op0, op1);
1725 if (tem)
1726 return tem;
1727 break;
1728
1729 case UDIV:
1730 /* Convert divide by power of two into shift (divide by 1 handled
1731 below). */
1732 if (GET_CODE (trueop1) == CONST_INT
1733 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1734 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1735
1736 /* Fall through.... */
1737
1738 case DIV:
1739 if (trueop1 == CONST1_RTX (mode))
1740 {
1741 /* On some platforms DIV uses narrower mode than its
1742 operands. */
1743 rtx x = gen_lowpart_common (mode, op0);
1744 if (x)
1745 return x;
1746 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1747 return gen_lowpart_SUBREG (mode, op0);
1748 else
1749 return op0;
1750 }
1751
1752 /* Maybe change 0 / x to 0. This transformation isn't safe for
1753 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1754 Nor is it safe for modes with signed zeros, since dividing
1755 0 by a negative number gives -0, not 0. */
1756 if (!HONOR_NANS (mode)
1757 && !HONOR_SIGNED_ZEROS (mode)
1758 && trueop0 == CONST0_RTX (mode)
1759 && ! side_effects_p (op1))
1760 return op0;
1761
1762 /* Change division by a constant into multiplication. Only do
1763 this with -funsafe-math-optimizations. */
1764 else if (GET_CODE (trueop1) == CONST_DOUBLE
1765 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1766 && trueop1 != CONST0_RTX (mode)
1767 && flag_unsafe_math_optimizations)
1768 {
1769 REAL_VALUE_TYPE d;
1770 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1771
1772 if (! REAL_VALUES_EQUAL (d, dconst0))
1773 {
1774 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1775 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1776 return simplify_gen_binary (MULT, mode, op0, tem);
1777 }
1778 }
1779 break;
1780
1781 case UMOD:
1782 /* Handle modulus by power of two (mod with 1 handled below). */
1783 if (GET_CODE (trueop1) == CONST_INT
1784 && exact_log2 (INTVAL (trueop1)) > 0)
1785 return simplify_gen_binary (AND, mode, op0,
1786 GEN_INT (INTVAL (op1) - 1));
1787
1788 /* Fall through.... */
1789
1790 case MOD:
1791 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1792 && ! side_effects_p (op0) && ! side_effects_p (op1))
1793 return const0_rtx;
1794 break;
1795
1796 case ROTATERT:
1797 case ROTATE:
1798 case ASHIFTRT:
1799 /* Rotating ~0 always results in ~0. */
1800 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1801 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1802 && ! side_effects_p (op1))
1803 return op0;
1804
1805 /* Fall through.... */
1806
1807 case ASHIFT:
1808 case LSHIFTRT:
1809 if (trueop1 == const0_rtx)
1810 return op0;
1811 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1812 return op0;
1813 break;
1814
1815 case SMIN:
1816 if (width <= HOST_BITS_PER_WIDE_INT
1817 && GET_CODE (trueop1) == CONST_INT
1818 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1819 && ! side_effects_p (op0))
1820 return op1;
1821 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1822 return op0;
1823 tem = simplify_associative_operation (code, mode, op0, op1);
1824 if (tem)
1825 return tem;
1826 break;
1827
1828 case SMAX:
1829 if (width <= HOST_BITS_PER_WIDE_INT
1830 && GET_CODE (trueop1) == CONST_INT
1831 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1832 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1833 && ! side_effects_p (op0))
1834 return op1;
1835 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1836 return op0;
1837 tem = simplify_associative_operation (code, mode, op0, op1);
1838 if (tem)
1839 return tem;
1840 break;
1841
1842 case UMIN:
1843 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1844 return op1;
1845 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1846 return op0;
1847 tem = simplify_associative_operation (code, mode, op0, op1);
1848 if (tem)
1849 return tem;
1850 break;
1851
1852 case UMAX:
1853 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1854 return op1;
1855 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1856 return op0;
1857 tem = simplify_associative_operation (code, mode, op0, op1);
1858 if (tem)
1859 return tem;
1860 break;
1861
1862 case SS_PLUS:
1863 case US_PLUS:
1864 case SS_MINUS:
1865 case US_MINUS:
1866 /* ??? There are simplifications that can be done. */
1867 return 0;
1868
1869 case VEC_SELECT:
1870 if (!VECTOR_MODE_P (mode))
1871 {
1872 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1873 || (mode
1874 != GET_MODE_INNER (GET_MODE (trueop0)))
1875 || GET_CODE (trueop1) != PARALLEL
1876 || XVECLEN (trueop1, 0) != 1
1877 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1878 abort ();
1879
1880 if (GET_CODE (trueop0) == CONST_VECTOR)
1881 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1882 }
1883 else
1884 {
1885 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1886 || (GET_MODE_INNER (mode)
1887 != GET_MODE_INNER (GET_MODE (trueop0)))
1888 || GET_CODE (trueop1) != PARALLEL)
1889 abort ();
1890
1891 if (GET_CODE (trueop0) == CONST_VECTOR)
1892 {
1893 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1894 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1895 rtvec v = rtvec_alloc (n_elts);
1896 unsigned int i;
1897
1898 if (XVECLEN (trueop1, 0) != (int) n_elts)
1899 abort ();
1900 for (i = 0; i < n_elts; i++)
1901 {
1902 rtx x = XVECEXP (trueop1, 0, i);
1903
1904 if (GET_CODE (x) != CONST_INT)
1905 abort ();
1906 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1907 }
1908
1909 return gen_rtx_CONST_VECTOR (mode, v);
1910 }
1911 }
1912 return 0;
1913 case VEC_CONCAT:
1914 {
1915 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1916 ? GET_MODE (trueop0)
1917 : GET_MODE_INNER (mode));
1918 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1919 ? GET_MODE (trueop1)
1920 : GET_MODE_INNER (mode));
1921
1922 if (!VECTOR_MODE_P (mode)
1923 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1924 != GET_MODE_SIZE (mode)))
1925 abort ();
1926
1927 if ((VECTOR_MODE_P (op0_mode)
1928 && (GET_MODE_INNER (mode)
1929 != GET_MODE_INNER (op0_mode)))
1930 || (!VECTOR_MODE_P (op0_mode)
1931 && GET_MODE_INNER (mode) != op0_mode))
1932 abort ();
1933
1934 if ((VECTOR_MODE_P (op1_mode)
1935 && (GET_MODE_INNER (mode)
1936 != GET_MODE_INNER (op1_mode)))
1937 || (!VECTOR_MODE_P (op1_mode)
1938 && GET_MODE_INNER (mode) != op1_mode))
1939 abort ();
1940
1941 if ((GET_CODE (trueop0) == CONST_VECTOR
1942 || GET_CODE (trueop0) == CONST_INT
1943 || GET_CODE (trueop0) == CONST_DOUBLE)
1944 && (GET_CODE (trueop1) == CONST_VECTOR
1945 || GET_CODE (trueop1) == CONST_INT
1946 || GET_CODE (trueop1) == CONST_DOUBLE))
1947 {
1948 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1949 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1950 rtvec v = rtvec_alloc (n_elts);
1951 unsigned int i;
1952 unsigned in_n_elts = 1;
1953
1954 if (VECTOR_MODE_P (op0_mode))
1955 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1956 for (i = 0; i < n_elts; i++)
1957 {
1958 if (i < in_n_elts)
1959 {
1960 if (!VECTOR_MODE_P (op0_mode))
1961 RTVEC_ELT (v, i) = trueop0;
1962 else
1963 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1964 }
1965 else
1966 {
1967 if (!VECTOR_MODE_P (op1_mode))
1968 RTVEC_ELT (v, i) = trueop1;
1969 else
1970 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1971 i - in_n_elts);
1972 }
1973 }
1974
1975 return gen_rtx_CONST_VECTOR (mode, v);
1976 }
1977 }
1978 return 0;
1979
1980 default:
1981 abort ();
1982 }
1983
1984 return 0;
1985 }
1986
1987 /* Get the integer argument values in two forms:
1988 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1989
1990 arg0 = INTVAL (trueop0);
1991 arg1 = INTVAL (trueop1);
1992
1993 if (width < HOST_BITS_PER_WIDE_INT)
1994 {
1995 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1996 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1997
1998 arg0s = arg0;
1999 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2000 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2001
2002 arg1s = arg1;
2003 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2004 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2005 }
2006 else
2007 {
2008 arg0s = arg0;
2009 arg1s = arg1;
2010 }
2011
2012 /* Compute the value of the arithmetic. */
2013
2014 switch (code)
2015 {
2016 case PLUS:
2017 val = arg0s + arg1s;
2018 break;
2019
2020 case MINUS:
2021 val = arg0s - arg1s;
2022 break;
2023
2024 case MULT:
2025 val = arg0s * arg1s;
2026 break;
2027
2028 case DIV:
2029 if (arg1s == 0
2030 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2031 && arg1s == -1))
2032 return 0;
2033 val = arg0s / arg1s;
2034 break;
2035
2036 case MOD:
2037 if (arg1s == 0
2038 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2039 && arg1s == -1))
2040 return 0;
2041 val = arg0s % arg1s;
2042 break;
2043
2044 case UDIV:
2045 if (arg1 == 0
2046 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2047 && arg1s == -1))
2048 return 0;
2049 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2050 break;
2051
2052 case UMOD:
2053 if (arg1 == 0
2054 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2055 && arg1s == -1))
2056 return 0;
2057 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2058 break;
2059
2060 case AND:
2061 val = arg0 & arg1;
2062 break;
2063
2064 case IOR:
2065 val = arg0 | arg1;
2066 break;
2067
2068 case XOR:
2069 val = arg0 ^ arg1;
2070 break;
2071
2072 case LSHIFTRT:
2073 /* If shift count is undefined, don't fold it; let the machine do
2074 what it wants. But truncate it if the machine will do that. */
2075 if (arg1 < 0)
2076 return 0;
2077
2078 #ifdef SHIFT_COUNT_TRUNCATED
2079 if (SHIFT_COUNT_TRUNCATED)
2080 arg1 %= width;
2081 #endif
2082
2083 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2084 break;
2085
2086 case ASHIFT:
2087 if (arg1 < 0)
2088 return 0;
2089
2090 #ifdef SHIFT_COUNT_TRUNCATED
2091 if (SHIFT_COUNT_TRUNCATED)
2092 arg1 %= width;
2093 #endif
2094
2095 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2096 break;
2097
2098 case ASHIFTRT:
2099 if (arg1 < 0)
2100 return 0;
2101
2102 #ifdef SHIFT_COUNT_TRUNCATED
2103 if (SHIFT_COUNT_TRUNCATED)
2104 arg1 %= width;
2105 #endif
2106
2107 val = arg0s >> arg1;
2108
2109 /* Bootstrap compiler may not have sign extended the right shift.
2110 Manually extend the sign to insure bootstrap cc matches gcc. */
2111 if (arg0s < 0 && arg1 > 0)
2112 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2113
2114 break;
2115
2116 case ROTATERT:
2117 if (arg1 < 0)
2118 return 0;
2119
2120 arg1 %= width;
2121 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2122 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2123 break;
2124
2125 case ROTATE:
2126 if (arg1 < 0)
2127 return 0;
2128
2129 arg1 %= width;
2130 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2131 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2132 break;
2133
2134 case COMPARE:
2135 /* Do nothing here. */
2136 return 0;
2137
2138 case SMIN:
2139 val = arg0s <= arg1s ? arg0s : arg1s;
2140 break;
2141
2142 case UMIN:
2143 val = ((unsigned HOST_WIDE_INT) arg0
2144 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2145 break;
2146
2147 case SMAX:
2148 val = arg0s > arg1s ? arg0s : arg1s;
2149 break;
2150
2151 case UMAX:
2152 val = ((unsigned HOST_WIDE_INT) arg0
2153 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2154 break;
2155
2156 case SS_PLUS:
2157 case US_PLUS:
2158 case SS_MINUS:
2159 case US_MINUS:
2160 /* ??? There are simplifications that can be done. */
2161 return 0;
2162
2163 default:
2164 abort ();
2165 }
2166
2167 val = trunc_int_for_mode (val, mode);
2168
2169 return GEN_INT (val);
2170 }
2171 \f
2172 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2173 PLUS or MINUS.
2174
2175 Rather than test for specific case, we do this by a brute-force method
2176 and do all possible simplifications until no more changes occur. Then
2177 we rebuild the operation.
2178
2179 If FORCE is true, then always generate the rtx. This is used to
2180 canonicalize stuff emitted from simplify_gen_binary. Note that this
2181 can still fail if the rtx is too complex. It won't fail just because
2182 the result is not 'simpler' than the input, however. */
2183
2184 struct simplify_plus_minus_op_data
2185 {
2186 rtx op;
2187 int neg;
2188 };
2189
2190 static int
2191 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2192 {
2193 const struct simplify_plus_minus_op_data *d1 = p1;
2194 const struct simplify_plus_minus_op_data *d2 = p2;
2195
2196 return (commutative_operand_precedence (d2->op)
2197 - commutative_operand_precedence (d1->op));
2198 }
2199
2200 static rtx
2201 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2202 rtx op1, int force)
2203 {
2204 struct simplify_plus_minus_op_data ops[8];
2205 rtx result, tem;
2206 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2207 int first, negate, changed;
2208 int i, j;
2209
2210 memset (ops, 0, sizeof ops);
2211
2212 /* Set up the two operands and then expand them until nothing has been
2213 changed. If we run out of room in our array, give up; this should
2214 almost never happen. */
2215
2216 ops[0].op = op0;
2217 ops[0].neg = 0;
2218 ops[1].op = op1;
2219 ops[1].neg = (code == MINUS);
2220
2221 do
2222 {
2223 changed = 0;
2224
2225 for (i = 0; i < n_ops; i++)
2226 {
2227 rtx this_op = ops[i].op;
2228 int this_neg = ops[i].neg;
2229 enum rtx_code this_code = GET_CODE (this_op);
2230
2231 switch (this_code)
2232 {
2233 case PLUS:
2234 case MINUS:
2235 if (n_ops == 7)
2236 return NULL_RTX;
2237
2238 ops[n_ops].op = XEXP (this_op, 1);
2239 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2240 n_ops++;
2241
2242 ops[i].op = XEXP (this_op, 0);
2243 input_ops++;
2244 changed = 1;
2245 break;
2246
2247 case NEG:
2248 ops[i].op = XEXP (this_op, 0);
2249 ops[i].neg = ! this_neg;
2250 changed = 1;
2251 break;
2252
2253 case CONST:
2254 if (n_ops < 7
2255 && GET_CODE (XEXP (this_op, 0)) == PLUS
2256 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2257 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2258 {
2259 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2260 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2261 ops[n_ops].neg = this_neg;
2262 n_ops++;
2263 input_consts++;
2264 changed = 1;
2265 }
2266 break;
2267
2268 case NOT:
2269 /* ~a -> (-a - 1) */
2270 if (n_ops != 7)
2271 {
2272 ops[n_ops].op = constm1_rtx;
2273 ops[n_ops++].neg = this_neg;
2274 ops[i].op = XEXP (this_op, 0);
2275 ops[i].neg = !this_neg;
2276 changed = 1;
2277 }
2278 break;
2279
2280 case CONST_INT:
2281 if (this_neg)
2282 {
2283 ops[i].op = neg_const_int (mode, this_op);
2284 ops[i].neg = 0;
2285 changed = 1;
2286 }
2287 break;
2288
2289 default:
2290 break;
2291 }
2292 }
2293 }
2294 while (changed);
2295
2296 /* If we only have two operands, we can't do anything. */
2297 if (n_ops <= 2 && !force)
2298 return NULL_RTX;
2299
2300 /* Count the number of CONSTs we didn't split above. */
2301 for (i = 0; i < n_ops; i++)
2302 if (GET_CODE (ops[i].op) == CONST)
2303 input_consts++;
2304
2305 /* Now simplify each pair of operands until nothing changes. The first
2306 time through just simplify constants against each other. */
2307
2308 first = 1;
2309 do
2310 {
2311 changed = first;
2312
2313 for (i = 0; i < n_ops - 1; i++)
2314 for (j = i + 1; j < n_ops; j++)
2315 {
2316 rtx lhs = ops[i].op, rhs = ops[j].op;
2317 int lneg = ops[i].neg, rneg = ops[j].neg;
2318
2319 if (lhs != 0 && rhs != 0
2320 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2321 {
2322 enum rtx_code ncode = PLUS;
2323
2324 if (lneg != rneg)
2325 {
2326 ncode = MINUS;
2327 if (lneg)
2328 tem = lhs, lhs = rhs, rhs = tem;
2329 }
2330 else if (swap_commutative_operands_p (lhs, rhs))
2331 tem = lhs, lhs = rhs, rhs = tem;
2332
2333 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2334
2335 /* Reject "simplifications" that just wrap the two
2336 arguments in a CONST. Failure to do so can result
2337 in infinite recursion with simplify_binary_operation
2338 when it calls us to simplify CONST operations. */
2339 if (tem
2340 && ! (GET_CODE (tem) == CONST
2341 && GET_CODE (XEXP (tem, 0)) == ncode
2342 && XEXP (XEXP (tem, 0), 0) == lhs
2343 && XEXP (XEXP (tem, 0), 1) == rhs)
2344 /* Don't allow -x + -1 -> ~x simplifications in the
2345 first pass. This allows us the chance to combine
2346 the -1 with other constants. */
2347 && ! (first
2348 && GET_CODE (tem) == NOT
2349 && XEXP (tem, 0) == rhs))
2350 {
2351 lneg &= rneg;
2352 if (GET_CODE (tem) == NEG)
2353 tem = XEXP (tem, 0), lneg = !lneg;
2354 if (GET_CODE (tem) == CONST_INT && lneg)
2355 tem = neg_const_int (mode, tem), lneg = 0;
2356
2357 ops[i].op = tem;
2358 ops[i].neg = lneg;
2359 ops[j].op = NULL_RTX;
2360 changed = 1;
2361 }
2362 }
2363 }
2364
2365 first = 0;
2366 }
2367 while (changed);
2368
2369 /* Pack all the operands to the lower-numbered entries. */
2370 for (i = 0, j = 0; j < n_ops; j++)
2371 if (ops[j].op)
2372 ops[i++] = ops[j];
2373 n_ops = i;
2374
2375 /* Sort the operations based on swap_commutative_operands_p. */
2376 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2377
2378 /* We suppressed creation of trivial CONST expressions in the
2379 combination loop to avoid recursion. Create one manually now.
2380 The combination loop should have ensured that there is exactly
2381 one CONST_INT, and the sort will have ensured that it is last
2382 in the array and that any other constant will be next-to-last. */
2383
2384 if (n_ops > 1
2385 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2386 && CONSTANT_P (ops[n_ops - 2].op))
2387 {
2388 rtx value = ops[n_ops - 1].op;
2389 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2390 value = neg_const_int (mode, value);
2391 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2392 n_ops--;
2393 }
2394
2395 /* Count the number of CONSTs that we generated. */
2396 n_consts = 0;
2397 for (i = 0; i < n_ops; i++)
2398 if (GET_CODE (ops[i].op) == CONST)
2399 n_consts++;
2400
2401 /* Give up if we didn't reduce the number of operands we had. Make
2402 sure we count a CONST as two operands. If we have the same
2403 number of operands, but have made more CONSTs than before, this
2404 is also an improvement, so accept it. */
2405 if (!force
2406 && (n_ops + n_consts > input_ops
2407 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2408 return NULL_RTX;
2409
2410 /* Put a non-negated operand first. If there aren't any, make all
2411 operands positive and negate the whole thing later. */
2412
2413 negate = 0;
2414 for (i = 0; i < n_ops && ops[i].neg; i++)
2415 continue;
2416 if (i == n_ops)
2417 {
2418 for (i = 0; i < n_ops; i++)
2419 ops[i].neg = 0;
2420 negate = 1;
2421 }
2422 else if (i != 0)
2423 {
2424 tem = ops[0].op;
2425 ops[0] = ops[i];
2426 ops[i].op = tem;
2427 ops[i].neg = 1;
2428 }
2429
2430 /* Now make the result by performing the requested operations. */
2431 result = ops[0].op;
2432 for (i = 1; i < n_ops; i++)
2433 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2434 mode, result, ops[i].op);
2435
2436 return negate ? gen_rtx_NEG (mode, result) : result;
2437 }
2438
2439 /* Like simplify_binary_operation except used for relational operators.
2440 MODE is the mode of the operands, not that of the result. If MODE
2441 is VOIDmode, both operands must also be VOIDmode and we compare the
2442 operands in "infinite precision".
2443
2444 If no simplification is possible, this function returns zero. Otherwise,
2445 it returns either const_true_rtx or const0_rtx. */
2446
2447 rtx
2448 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2449 rtx op0, rtx op1)
2450 {
2451 int equal, op0lt, op0ltu, op1lt, op1ltu;
2452 rtx tem;
2453 rtx trueop0;
2454 rtx trueop1;
2455
2456 if (mode == VOIDmode
2457 && (GET_MODE (op0) != VOIDmode
2458 || GET_MODE (op1) != VOIDmode))
2459 abort ();
2460
2461 /* If op0 is a compare, extract the comparison arguments from it. */
2462 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2463 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2464
2465 trueop0 = avoid_constant_pool_reference (op0);
2466 trueop1 = avoid_constant_pool_reference (op1);
2467
2468 /* We can't simplify MODE_CC values since we don't know what the
2469 actual comparison is. */
2470 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2471 return 0;
2472
2473 /* Make sure the constant is second. */
2474 if (swap_commutative_operands_p (trueop0, trueop1))
2475 {
2476 tem = op0, op0 = op1, op1 = tem;
2477 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2478 code = swap_condition (code);
2479 }
2480
2481 /* For integer comparisons of A and B maybe we can simplify A - B and can
2482 then simplify a comparison of that with zero. If A and B are both either
2483 a register or a CONST_INT, this can't help; testing for these cases will
2484 prevent infinite recursion here and speed things up.
2485
2486 If CODE is an unsigned comparison, then we can never do this optimization,
2487 because it gives an incorrect result if the subtraction wraps around zero.
2488 ANSI C defines unsigned operations such that they never overflow, and
2489 thus such cases can not be ignored. */
2490
2491 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2492 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2493 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2494 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2495 && code != GTU && code != GEU && code != LTU && code != LEU)
2496 return simplify_relational_operation (signed_condition (code),
2497 mode, tem, const0_rtx);
2498
2499 if (flag_unsafe_math_optimizations && code == ORDERED)
2500 return const_true_rtx;
2501
2502 if (flag_unsafe_math_optimizations && code == UNORDERED)
2503 return const0_rtx;
2504
2505 /* For modes without NaNs, if the two operands are equal, we know the
2506 result except if they have side-effects. */
2507 if (! HONOR_NANS (GET_MODE (trueop0))
2508 && rtx_equal_p (trueop0, trueop1)
2509 && ! side_effects_p (trueop0))
2510 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2511
2512 /* If the operands are floating-point constants, see if we can fold
2513 the result. */
2514 else if (GET_CODE (trueop0) == CONST_DOUBLE
2515 && GET_CODE (trueop1) == CONST_DOUBLE
2516 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2517 {
2518 REAL_VALUE_TYPE d0, d1;
2519
2520 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2521 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2522
2523 /* Comparisons are unordered iff at least one of the values is NaN. */
2524 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2525 switch (code)
2526 {
2527 case UNEQ:
2528 case UNLT:
2529 case UNGT:
2530 case UNLE:
2531 case UNGE:
2532 case NE:
2533 case UNORDERED:
2534 return const_true_rtx;
2535 case EQ:
2536 case LT:
2537 case GT:
2538 case LE:
2539 case GE:
2540 case LTGT:
2541 case ORDERED:
2542 return const0_rtx;
2543 default:
2544 return 0;
2545 }
2546
2547 equal = REAL_VALUES_EQUAL (d0, d1);
2548 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2549 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2550 }
2551
2552 /* Otherwise, see if the operands are both integers. */
2553 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2554 && (GET_CODE (trueop0) == CONST_DOUBLE
2555 || GET_CODE (trueop0) == CONST_INT)
2556 && (GET_CODE (trueop1) == CONST_DOUBLE
2557 || GET_CODE (trueop1) == CONST_INT))
2558 {
2559 int width = GET_MODE_BITSIZE (mode);
2560 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2561 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2562
2563 /* Get the two words comprising each integer constant. */
2564 if (GET_CODE (trueop0) == CONST_DOUBLE)
2565 {
2566 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2567 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2568 }
2569 else
2570 {
2571 l0u = l0s = INTVAL (trueop0);
2572 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2573 }
2574
2575 if (GET_CODE (trueop1) == CONST_DOUBLE)
2576 {
2577 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2578 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2579 }
2580 else
2581 {
2582 l1u = l1s = INTVAL (trueop1);
2583 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2584 }
2585
2586 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2587 we have to sign or zero-extend the values. */
2588 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2589 {
2590 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2591 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2592
2593 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2594 l0s |= ((HOST_WIDE_INT) (-1) << width);
2595
2596 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2597 l1s |= ((HOST_WIDE_INT) (-1) << width);
2598 }
2599 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2600 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2601
2602 equal = (h0u == h1u && l0u == l1u);
2603 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2604 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2605 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2606 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2607 }
2608
2609 /* Otherwise, there are some code-specific tests we can make. */
2610 else
2611 {
2612 switch (code)
2613 {
2614 case EQ:
2615 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2616 return const0_rtx;
2617 break;
2618
2619 case NE:
2620 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2621 return const_true_rtx;
2622 break;
2623
2624 case GEU:
2625 /* Unsigned values are never negative. */
2626 if (trueop1 == const0_rtx)
2627 return const_true_rtx;
2628 break;
2629
2630 case LTU:
2631 if (trueop1 == const0_rtx)
2632 return const0_rtx;
2633 break;
2634
2635 case LEU:
2636 /* Unsigned values are never greater than the largest
2637 unsigned value. */
2638 if (GET_CODE (trueop1) == CONST_INT
2639 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2640 && INTEGRAL_MODE_P (mode))
2641 return const_true_rtx;
2642 break;
2643
2644 case GTU:
2645 if (GET_CODE (trueop1) == CONST_INT
2646 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2647 && INTEGRAL_MODE_P (mode))
2648 return const0_rtx;
2649 break;
2650
2651 case LT:
2652 /* Optimize abs(x) < 0.0. */
2653 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2654 {
2655 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2656 : trueop0;
2657 if (GET_CODE (tem) == ABS)
2658 return const0_rtx;
2659 }
2660 break;
2661
2662 case GE:
2663 /* Optimize abs(x) >= 0.0. */
2664 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2665 {
2666 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2667 : trueop0;
2668 if (GET_CODE (tem) == ABS)
2669 return const_true_rtx;
2670 }
2671 break;
2672
2673 case UNGE:
2674 /* Optimize ! (abs(x) < 0.0). */
2675 if (trueop1 == CONST0_RTX (mode))
2676 {
2677 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2678 : trueop0;
2679 if (GET_CODE (tem) == ABS)
2680 return const_true_rtx;
2681 }
2682 break;
2683
2684 default:
2685 break;
2686 }
2687
2688 return 0;
2689 }
2690
2691 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2692 as appropriate. */
2693 switch (code)
2694 {
2695 case EQ:
2696 case UNEQ:
2697 return equal ? const_true_rtx : const0_rtx;
2698 case NE:
2699 case LTGT:
2700 return ! equal ? const_true_rtx : const0_rtx;
2701 case LT:
2702 case UNLT:
2703 return op0lt ? const_true_rtx : const0_rtx;
2704 case GT:
2705 case UNGT:
2706 return op1lt ? const_true_rtx : const0_rtx;
2707 case LTU:
2708 return op0ltu ? const_true_rtx : const0_rtx;
2709 case GTU:
2710 return op1ltu ? const_true_rtx : const0_rtx;
2711 case LE:
2712 case UNLE:
2713 return equal || op0lt ? const_true_rtx : const0_rtx;
2714 case GE:
2715 case UNGE:
2716 return equal || op1lt ? const_true_rtx : const0_rtx;
2717 case LEU:
2718 return equal || op0ltu ? const_true_rtx : const0_rtx;
2719 case GEU:
2720 return equal || op1ltu ? const_true_rtx : const0_rtx;
2721 case ORDERED:
2722 return const_true_rtx;
2723 case UNORDERED:
2724 return const0_rtx;
2725 default:
2726 abort ();
2727 }
2728 }
2729 \f
2730 /* Simplify CODE, an operation with result mode MODE and three operands,
2731 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2732 a constant. Return 0 if no simplifications is possible. */
2733
2734 rtx
2735 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2736 enum machine_mode op0_mode, rtx op0, rtx op1,
2737 rtx op2)
2738 {
2739 unsigned int width = GET_MODE_BITSIZE (mode);
2740
2741 /* VOIDmode means "infinite" precision. */
2742 if (width == 0)
2743 width = HOST_BITS_PER_WIDE_INT;
2744
2745 switch (code)
2746 {
2747 case SIGN_EXTRACT:
2748 case ZERO_EXTRACT:
2749 if (GET_CODE (op0) == CONST_INT
2750 && GET_CODE (op1) == CONST_INT
2751 && GET_CODE (op2) == CONST_INT
2752 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2753 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2754 {
2755 /* Extracting a bit-field from a constant */
2756 HOST_WIDE_INT val = INTVAL (op0);
2757
2758 if (BITS_BIG_ENDIAN)
2759 val >>= (GET_MODE_BITSIZE (op0_mode)
2760 - INTVAL (op2) - INTVAL (op1));
2761 else
2762 val >>= INTVAL (op2);
2763
2764 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2765 {
2766 /* First zero-extend. */
2767 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2768 /* If desired, propagate sign bit. */
2769 if (code == SIGN_EXTRACT
2770 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2771 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2772 }
2773
2774 /* Clear the bits that don't belong in our mode,
2775 unless they and our sign bit are all one.
2776 So we get either a reasonable negative value or a reasonable
2777 unsigned value for this mode. */
2778 if (width < HOST_BITS_PER_WIDE_INT
2779 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2780 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2781 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2782
2783 return GEN_INT (val);
2784 }
2785 break;
2786
2787 case IF_THEN_ELSE:
2788 if (GET_CODE (op0) == CONST_INT)
2789 return op0 != const0_rtx ? op1 : op2;
2790
2791 /* Convert a == b ? b : a to "a". */
2792 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2793 && !HONOR_NANS (mode)
2794 && rtx_equal_p (XEXP (op0, 0), op1)
2795 && rtx_equal_p (XEXP (op0, 1), op2))
2796 return op1;
2797 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2798 && !HONOR_NANS (mode)
2799 && rtx_equal_p (XEXP (op0, 1), op1)
2800 && rtx_equal_p (XEXP (op0, 0), op2))
2801 return op2;
2802 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2803 {
2804 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2805 ? GET_MODE (XEXP (op0, 1))
2806 : GET_MODE (XEXP (op0, 0)));
2807 rtx temp;
2808 if (cmp_mode == VOIDmode)
2809 cmp_mode = op0_mode;
2810 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2811 XEXP (op0, 0), XEXP (op0, 1));
2812
2813 /* See if any simplifications were possible. */
2814 if (temp == const0_rtx)
2815 return op2;
2816 else if (temp == const_true_rtx)
2817 return op1;
2818 else if (temp)
2819 abort ();
2820
2821 /* Look for happy constants in op1 and op2. */
2822 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2823 {
2824 HOST_WIDE_INT t = INTVAL (op1);
2825 HOST_WIDE_INT f = INTVAL (op2);
2826
2827 if (t == STORE_FLAG_VALUE && f == 0)
2828 code = GET_CODE (op0);
2829 else if (t == 0 && f == STORE_FLAG_VALUE)
2830 {
2831 enum rtx_code tmp;
2832 tmp = reversed_comparison_code (op0, NULL_RTX);
2833 if (tmp == UNKNOWN)
2834 break;
2835 code = tmp;
2836 }
2837 else
2838 break;
2839
2840 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2841 }
2842 }
2843 break;
2844 case VEC_MERGE:
2845 if (GET_MODE (op0) != mode
2846 || GET_MODE (op1) != mode
2847 || !VECTOR_MODE_P (mode))
2848 abort ();
2849 op2 = avoid_constant_pool_reference (op2);
2850 if (GET_CODE (op2) == CONST_INT)
2851 {
2852 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2853 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2854 int mask = (1 << n_elts) - 1;
2855
2856 if (!(INTVAL (op2) & mask))
2857 return op1;
2858 if ((INTVAL (op2) & mask) == mask)
2859 return op0;
2860
2861 op0 = avoid_constant_pool_reference (op0);
2862 op1 = avoid_constant_pool_reference (op1);
2863 if (GET_CODE (op0) == CONST_VECTOR
2864 && GET_CODE (op1) == CONST_VECTOR)
2865 {
2866 rtvec v = rtvec_alloc (n_elts);
2867 unsigned int i;
2868
2869 for (i = 0; i < n_elts; i++)
2870 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2871 ? CONST_VECTOR_ELT (op0, i)
2872 : CONST_VECTOR_ELT (op1, i));
2873 return gen_rtx_CONST_VECTOR (mode, v);
2874 }
2875 }
2876 break;
2877
2878 default:
2879 abort ();
2880 }
2881
2882 return 0;
2883 }
2884
2885 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2886 Return 0 if no simplifications is possible. */
2887 rtx
2888 simplify_subreg (enum machine_mode outermode, rtx op,
2889 enum machine_mode innermode, unsigned int byte)
2890 {
2891 /* Little bit of sanity checking. */
2892 if (innermode == VOIDmode || outermode == VOIDmode
2893 || innermode == BLKmode || outermode == BLKmode)
2894 abort ();
2895
2896 if (GET_MODE (op) != innermode
2897 && GET_MODE (op) != VOIDmode)
2898 abort ();
2899
2900 if (byte % GET_MODE_SIZE (outermode)
2901 || byte >= GET_MODE_SIZE (innermode))
2902 abort ();
2903
2904 if (outermode == innermode && !byte)
2905 return op;
2906
2907 /* Simplify subregs of vector constants. */
2908 if (GET_CODE (op) == CONST_VECTOR)
2909 {
2910 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2911 const unsigned int offset = byte / elt_size;
2912 rtx elt;
2913
2914 if (GET_MODE_INNER (innermode) == outermode)
2915 {
2916 elt = CONST_VECTOR_ELT (op, offset);
2917
2918 /* ?? We probably don't need this copy_rtx because constants
2919 can be shared. ?? */
2920
2921 return copy_rtx (elt);
2922 }
2923 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2924 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2925 {
2926 return (gen_rtx_CONST_VECTOR
2927 (outermode,
2928 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2929 &CONST_VECTOR_ELT (op, offset))));
2930 }
2931 else if (GET_MODE_CLASS (outermode) == MODE_INT
2932 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2933 {
2934 /* This happens when the target register size is smaller then
2935 the vector mode, and we synthesize operations with vectors
2936 of elements that are smaller than the register size. */
2937 HOST_WIDE_INT sum = 0, high = 0;
2938 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2939 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2940 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2941 int shift = BITS_PER_UNIT * elt_size;
2942 unsigned HOST_WIDE_INT unit_mask;
2943
2944 unit_mask = (unsigned HOST_WIDE_INT) -1
2945 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2946
2947 for (; n_elts--; i += step)
2948 {
2949 elt = CONST_VECTOR_ELT (op, i);
2950 if (GET_CODE (elt) == CONST_DOUBLE
2951 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2952 {
2953 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2954 elt);
2955 if (! elt)
2956 return NULL_RTX;
2957 }
2958 if (GET_CODE (elt) != CONST_INT)
2959 return NULL_RTX;
2960 /* Avoid overflow. */
2961 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2962 return NULL_RTX;
2963 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2964 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2965 }
2966 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2967 return GEN_INT (trunc_int_for_mode (sum, outermode));
2968 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2969 return immed_double_const (sum, high, outermode);
2970 else
2971 return NULL_RTX;
2972 }
2973 else if (GET_MODE_CLASS (outermode) == MODE_INT
2974 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2975 {
2976 enum machine_mode new_mode
2977 = int_mode_for_mode (GET_MODE_INNER (innermode));
2978 int subbyte = byte % elt_size;
2979
2980 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2981 if (! op)
2982 return NULL_RTX;
2983 return simplify_subreg (outermode, op, new_mode, subbyte);
2984 }
2985 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2986 /* This shouldn't happen, but let's not do anything stupid. */
2987 return NULL_RTX;
2988 }
2989
2990 /* Attempt to simplify constant to non-SUBREG expression. */
2991 if (CONSTANT_P (op))
2992 {
2993 int offset, part;
2994 unsigned HOST_WIDE_INT val = 0;
2995
2996 if (VECTOR_MODE_P (outermode))
2997 {
2998 /* Construct a CONST_VECTOR from individual subregs. */
2999 enum machine_mode submode = GET_MODE_INNER (outermode);
3000 int subsize = GET_MODE_UNIT_SIZE (outermode);
3001 int i, elts = GET_MODE_NUNITS (outermode);
3002 rtvec v = rtvec_alloc (elts);
3003 rtx elt;
3004
3005 for (i = 0; i < elts; i++, byte += subsize)
3006 {
3007 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3008 /* ??? It would be nice if we could actually make such subregs
3009 on targets that allow such relocations. */
3010 if (byte >= GET_MODE_SIZE (innermode))
3011 elt = CONST0_RTX (submode);
3012 else
3013 elt = simplify_subreg (submode, op, innermode, byte);
3014 if (! elt)
3015 return NULL_RTX;
3016 RTVEC_ELT (v, i) = elt;
3017 }
3018 return gen_rtx_CONST_VECTOR (outermode, v);
3019 }
3020
3021 /* ??? This code is partly redundant with code below, but can handle
3022 the subregs of floats and similar corner cases.
3023 Later it we should move all simplification code here and rewrite
3024 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3025 using SIMPLIFY_SUBREG. */
3026 if (subreg_lowpart_offset (outermode, innermode) == byte
3027 && GET_CODE (op) != CONST_VECTOR)
3028 {
3029 rtx new = gen_lowpart_if_possible (outermode, op);
3030 if (new)
3031 return new;
3032 }
3033
3034 /* Similar comment as above apply here. */
3035 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
3036 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
3037 && GET_MODE_CLASS (outermode) == MODE_INT)
3038 {
3039 rtx new = constant_subword (op,
3040 (byte / UNITS_PER_WORD),
3041 innermode);
3042 if (new)
3043 return new;
3044 }
3045
3046 if (GET_MODE_CLASS (outermode) != MODE_INT
3047 && GET_MODE_CLASS (outermode) != MODE_CC)
3048 {
3049 enum machine_mode new_mode = int_mode_for_mode (outermode);
3050
3051 if (new_mode != innermode || byte != 0)
3052 {
3053 op = simplify_subreg (new_mode, op, innermode, byte);
3054 if (! op)
3055 return NULL_RTX;
3056 return simplify_subreg (outermode, op, new_mode, 0);
3057 }
3058 }
3059
3060 offset = byte * BITS_PER_UNIT;
3061 switch (GET_CODE (op))
3062 {
3063 case CONST_DOUBLE:
3064 if (GET_MODE (op) != VOIDmode)
3065 break;
3066
3067 /* We can't handle this case yet. */
3068 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
3069 return NULL_RTX;
3070
3071 part = offset >= HOST_BITS_PER_WIDE_INT;
3072 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
3073 && BYTES_BIG_ENDIAN)
3074 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
3075 && WORDS_BIG_ENDIAN))
3076 part = !part;
3077 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
3078 offset %= HOST_BITS_PER_WIDE_INT;
3079
3080 /* We've already picked the word we want from a double, so
3081 pretend this is actually an integer. */
3082 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3083
3084 /* FALLTHROUGH */
3085 case CONST_INT:
3086 if (GET_CODE (op) == CONST_INT)
3087 val = INTVAL (op);
3088
3089 /* We don't handle synthesizing of non-integral constants yet. */
3090 if (GET_MODE_CLASS (outermode) != MODE_INT)
3091 return NULL_RTX;
3092
3093 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3094 {
3095 if (WORDS_BIG_ENDIAN)
3096 offset = (GET_MODE_BITSIZE (innermode)
3097 - GET_MODE_BITSIZE (outermode) - offset);
3098 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3099 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3100 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3101 - 2 * (offset % BITS_PER_WORD));
3102 }
3103
3104 if (offset >= HOST_BITS_PER_WIDE_INT)
3105 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3106 else
3107 {
3108 val >>= offset;
3109 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3110 val = trunc_int_for_mode (val, outermode);
3111 return GEN_INT (val);
3112 }
3113 default:
3114 break;
3115 }
3116 }
3117
3118 /* Changing mode twice with SUBREG => just change it once,
3119 or not at all if changing back op starting mode. */
3120 if (GET_CODE (op) == SUBREG)
3121 {
3122 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3123 int final_offset = byte + SUBREG_BYTE (op);
3124 rtx new;
3125
3126 if (outermode == innermostmode
3127 && byte == 0 && SUBREG_BYTE (op) == 0)
3128 return SUBREG_REG (op);
3129
3130 /* The SUBREG_BYTE represents offset, as if the value were stored
3131 in memory. Irritating exception is paradoxical subreg, where
3132 we define SUBREG_BYTE to be 0. On big endian machines, this
3133 value should be negative. For a moment, undo this exception. */
3134 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3135 {
3136 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3137 if (WORDS_BIG_ENDIAN)
3138 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3139 if (BYTES_BIG_ENDIAN)
3140 final_offset += difference % UNITS_PER_WORD;
3141 }
3142 if (SUBREG_BYTE (op) == 0
3143 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3144 {
3145 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3146 if (WORDS_BIG_ENDIAN)
3147 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3148 if (BYTES_BIG_ENDIAN)
3149 final_offset += difference % UNITS_PER_WORD;
3150 }
3151
3152 /* See whether resulting subreg will be paradoxical. */
3153 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3154 {
3155 /* In nonparadoxical subregs we can't handle negative offsets. */
3156 if (final_offset < 0)
3157 return NULL_RTX;
3158 /* Bail out in case resulting subreg would be incorrect. */
3159 if (final_offset % GET_MODE_SIZE (outermode)
3160 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3161 return NULL_RTX;
3162 }
3163 else
3164 {
3165 int offset = 0;
3166 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3167
3168 /* In paradoxical subreg, see if we are still looking on lower part.
3169 If so, our SUBREG_BYTE will be 0. */
3170 if (WORDS_BIG_ENDIAN)
3171 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3172 if (BYTES_BIG_ENDIAN)
3173 offset += difference % UNITS_PER_WORD;
3174 if (offset == final_offset)
3175 final_offset = 0;
3176 else
3177 return NULL_RTX;
3178 }
3179
3180 /* Recurse for further possible simplifications. */
3181 new = simplify_subreg (outermode, SUBREG_REG (op),
3182 GET_MODE (SUBREG_REG (op)),
3183 final_offset);
3184 if (new)
3185 return new;
3186 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3187 }
3188
3189 /* SUBREG of a hard register => just change the register number
3190 and/or mode. If the hard register is not valid in that mode,
3191 suppress this simplification. If the hard register is the stack,
3192 frame, or argument pointer, leave this as a SUBREG. */
3193
3194 if (REG_P (op)
3195 && (! REG_FUNCTION_VALUE_P (op)
3196 || ! rtx_equal_function_value_matters)
3197 && REGNO (op) < FIRST_PSEUDO_REGISTER
3198 #ifdef CANNOT_CHANGE_MODE_CLASS
3199 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3200 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3201 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3202 #endif
3203 && ((reload_completed && !frame_pointer_needed)
3204 || (REGNO (op) != FRAME_POINTER_REGNUM
3205 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3206 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3207 #endif
3208 ))
3209 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3210 && REGNO (op) != ARG_POINTER_REGNUM
3211 #endif
3212 && REGNO (op) != STACK_POINTER_REGNUM
3213 && subreg_offset_representable_p (REGNO (op), innermode,
3214 byte, outermode))
3215 {
3216 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3217 int final_regno = subreg_hard_regno (tem, 0);
3218
3219 /* ??? We do allow it if the current REG is not valid for
3220 its mode. This is a kludge to work around how float/complex
3221 arguments are passed on 32-bit SPARC and should be fixed. */
3222 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3223 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3224 {
3225 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3226
3227 /* Propagate original regno. We don't have any way to specify
3228 the offset inside original regno, so do so only for lowpart.
3229 The information is used only by alias analysis that can not
3230 grog partial register anyway. */
3231
3232 if (subreg_lowpart_offset (outermode, innermode) == byte)
3233 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3234 return x;
3235 }
3236 }
3237
3238 /* If we have a SUBREG of a register that we are replacing and we are
3239 replacing it with a MEM, make a new MEM and try replacing the
3240 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3241 or if we would be widening it. */
3242
3243 if (GET_CODE (op) == MEM
3244 && ! mode_dependent_address_p (XEXP (op, 0))
3245 /* Allow splitting of volatile memory references in case we don't
3246 have instruction to move the whole thing. */
3247 && (! MEM_VOLATILE_P (op)
3248 || ! have_insn_for (SET, innermode))
3249 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3250 return adjust_address_nv (op, outermode, byte);
3251
3252 /* Handle complex values represented as CONCAT
3253 of real and imaginary part. */
3254 if (GET_CODE (op) == CONCAT)
3255 {
3256 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3257 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3258 unsigned int final_offset;
3259 rtx res;
3260
3261 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3262 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3263 if (res)
3264 return res;
3265 /* We can at least simplify it by referring directly to the relevant part. */
3266 return gen_rtx_SUBREG (outermode, part, final_offset);
3267 }
3268
3269 return NULL_RTX;
3270 }
3271 /* Make a SUBREG operation or equivalent if it folds. */
3272
3273 rtx
3274 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3275 enum machine_mode innermode, unsigned int byte)
3276 {
3277 rtx new;
3278 /* Little bit of sanity checking. */
3279 if (innermode == VOIDmode || outermode == VOIDmode
3280 || innermode == BLKmode || outermode == BLKmode)
3281 abort ();
3282
3283 if (GET_MODE (op) != innermode
3284 && GET_MODE (op) != VOIDmode)
3285 abort ();
3286
3287 if (byte % GET_MODE_SIZE (outermode)
3288 || byte >= GET_MODE_SIZE (innermode))
3289 abort ();
3290
3291 if (GET_CODE (op) == QUEUED)
3292 return NULL_RTX;
3293
3294 new = simplify_subreg (outermode, op, innermode, byte);
3295 if (new)
3296 return new;
3297
3298 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3299 return NULL_RTX;
3300
3301 return gen_rtx_SUBREG (outermode, op, byte);
3302 }
3303 /* Simplify X, an rtx expression.
3304
3305 Return the simplified expression or NULL if no simplifications
3306 were possible.
3307
3308 This is the preferred entry point into the simplification routines;
3309 however, we still allow passes to call the more specific routines.
3310
3311 Right now GCC has three (yes, three) major bodies of RTL simplification
3312 code that need to be unified.
3313
3314 1. fold_rtx in cse.c. This code uses various CSE specific
3315 information to aid in RTL simplification.
3316
3317 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3318 it uses combine specific information to aid in RTL
3319 simplification.
3320
3321 3. The routines in this file.
3322
3323
3324 Long term we want to only have one body of simplification code; to
3325 get to that state I recommend the following steps:
3326
3327 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3328 which are not pass dependent state into these routines.
3329
3330 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3331 use this routine whenever possible.
3332
3333 3. Allow for pass dependent state to be provided to these
3334 routines and add simplifications based on the pass dependent
3335 state. Remove code from cse.c & combine.c that becomes
3336 redundant/dead.
3337
3338 It will take time, but ultimately the compiler will be easier to
3339 maintain and improve. It's totally silly that when we add a
3340 simplification that it needs to be added to 4 places (3 for RTL
3341 simplification and 1 for tree simplification. */
3342
3343 rtx
3344 simplify_rtx (rtx x)
3345 {
3346 enum rtx_code code = GET_CODE (x);
3347 enum machine_mode mode = GET_MODE (x);
3348 rtx temp;
3349
3350 switch (GET_RTX_CLASS (code))
3351 {
3352 case '1':
3353 return simplify_unary_operation (code, mode,
3354 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3355 case 'c':
3356 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3357 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3358
3359 /* Fall through.... */
3360
3361 case '2':
3362 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3363
3364 case '3':
3365 case 'b':
3366 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3367 XEXP (x, 0), XEXP (x, 1),
3368 XEXP (x, 2));
3369
3370 case '<':
3371 temp = simplify_relational_operation (code,
3372 ((GET_MODE (XEXP (x, 0))
3373 != VOIDmode)
3374 ? GET_MODE (XEXP (x, 0))
3375 : GET_MODE (XEXP (x, 1))),
3376 XEXP (x, 0), XEXP (x, 1));
3377 #ifdef FLOAT_STORE_FLAG_VALUE
3378 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3379 {
3380 if (temp == const0_rtx)
3381 temp = CONST0_RTX (mode);
3382 else
3383 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3384 mode);
3385 }
3386 #endif
3387 return temp;
3388
3389 case 'x':
3390 if (code == SUBREG)
3391 return simplify_gen_subreg (mode, SUBREG_REG (x),
3392 GET_MODE (SUBREG_REG (x)),
3393 SUBREG_BYTE (x));
3394 if (code == CONSTANT_P_RTX)
3395 {
3396 if (CONSTANT_P (XEXP (x, 0)))
3397 return const1_rtx;
3398 }
3399 break;
3400
3401 case 'o':
3402 if (code == LO_SUM)
3403 {
3404 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3405 if (GET_CODE (XEXP (x, 0)) == HIGH
3406 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3407 return XEXP (x, 1);
3408 }
3409 break;
3410
3411 default:
3412 break;
3413 }
3414 return NULL;
3415 }