re PR bootstrap/12269 (Mainline failed to bootstrap on Linux/ia64)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 \f
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
64 {
65 return gen_int_mode (- INTVAL (i), mode);
66 }
67
68 \f
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
71
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
75 {
76 rtx tem;
77
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
82
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
87
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
90
91 if (code == PLUS || code == MINUS)
92 {
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
96 }
97
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
99 }
100 \f
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
103 rtx
104 avoid_constant_pool_reference (rtx x)
105 {
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
108
109 switch (GET_CODE (x))
110 {
111 case MEM:
112 break;
113
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
119 {
120 REAL_VALUE_TYPE d;
121
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
124 }
125 return x;
126
127 default:
128 return x;
129 }
130
131 addr = XEXP (x, 0);
132
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
135
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
138
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
142
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
145
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
150 {
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
153 }
154
155 return c;
156 }
157 \f
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
160
161 rtx
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
164 {
165 rtx tem;
166
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
170
171 return gen_rtx_fmt_e (code, mode, op);
172 }
173
174 /* Likewise for ternary operations. */
175
176 rtx
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
179 {
180 rtx tem;
181
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
186
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
188 }
189 \f
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
192 */
193
194 rtx
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
197 {
198 rtx tem;
199
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
204
205 if (cmp_mode != VOIDmode)
206 {
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
208
209 if (tem)
210 {
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
213 {
214 REAL_VALUE_TYPE val;
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
218 abort ();
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
221 }
222 #endif
223 return tem;
224 }
225 }
226
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
231
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
236
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
239 {
240 if (code == NE)
241 {
242 if (GET_MODE (op0) == mode)
243 return op0;
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
246 }
247 else if (code == EQ)
248 {
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
250 if (new != UNKNOWN)
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
253 }
254 }
255
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
257 }
258 \f
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
261
262 rtx
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
264 {
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
271
272 if (x == old)
273 return new;
274
275 switch (GET_RTX_CLASS (code))
276 {
277 case '1':
278 {
279 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
280 rtx op = (XEXP (x, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
282
283 return simplify_gen_unary (code, mode, op, op_mode);
284 }
285
286 case '2':
287 case 'c':
288 return
289 simplify_gen_binary (code, mode,
290 simplify_replace_rtx (XEXP (x, 0), old, new),
291 simplify_replace_rtx (XEXP (x, 1), old, new));
292 case '<':
293 {
294 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x, 0))
296 : GET_MODE (XEXP (x, 1)));
297 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
298 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 }
301
302 case '3':
303 case 'b':
304 {
305 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
306 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307
308 return
309 simplify_gen_ternary (code, mode,
310 (op_mode != VOIDmode
311 ? op_mode
312 : GET_MODE (op0)),
313 op0,
314 simplify_replace_rtx (XEXP (x, 1), old, new),
315 simplify_replace_rtx (XEXP (x, 2), old, new));
316 }
317
318 case 'x':
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
321 {
322 rtx exp;
323 exp = simplify_gen_subreg (GET_MODE (x),
324 simplify_replace_rtx (SUBREG_REG (x),
325 old, new),
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 if (exp)
329 x = exp;
330 }
331 return x;
332
333 case 'o':
334 if (code == MEM)
335 return replace_equiv_address_nv (x,
336 simplify_replace_rtx (XEXP (x, 0),
337 old, new));
338 else if (code == LO_SUM)
339 {
340 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
341 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
345 return op1;
346
347 return gen_rtx_LO_SUM (mode, op0, op1);
348 }
349 else if (code == REG)
350 {
351 if (REG_P (old) && REGNO (x) == REGNO (old))
352 return new;
353 }
354
355 return x;
356
357 default:
358 return x;
359 }
360 return x;
361 }
362 \f
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
366 rtx
367 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
368 rtx op, enum machine_mode op_mode)
369 {
370 unsigned int width = GET_MODE_BITSIZE (mode);
371 rtx trueop = avoid_constant_pool_reference (op);
372
373 if (code == VEC_DUPLICATE)
374 {
375 if (!VECTOR_MODE_P (mode))
376 abort ();
377 if (GET_MODE (trueop) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop))
379 && GET_MODE_INNER (mode) != GET_MODE (trueop))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
384 abort ();
385 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
386 || GET_CODE (trueop) == CONST_VECTOR)
387 {
388 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
389 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
390 rtvec v = rtvec_alloc (n_elts);
391 unsigned int i;
392
393 if (GET_CODE (trueop) != CONST_VECTOR)
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = trueop;
396 else
397 {
398 enum machine_mode inmode = GET_MODE (trueop);
399 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
400 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
401
402 if (in_n_elts >= n_elts || n_elts % in_n_elts)
403 abort ();
404 for (i = 0; i < n_elts; i++)
405 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
406 }
407 return gen_rtx_CONST_VECTOR (mode, v);
408 }
409 }
410
411 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
412 {
413 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
414 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
415 enum machine_mode opmode = GET_MODE (trueop);
416 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
417 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
418 rtvec v = rtvec_alloc (n_elts);
419 unsigned int i;
420
421 if (op_n_elts != n_elts)
422 abort ();
423
424 for (i = 0; i < n_elts; i++)
425 {
426 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
427 CONST_VECTOR_ELT (trueop, i),
428 GET_MODE_INNER (opmode));
429 if (!x)
430 return 0;
431 RTVEC_ELT (v, i) = x;
432 }
433 return gen_rtx_CONST_VECTOR (mode, v);
434 }
435
436 /* The order of these tests is critical so that, for example, we don't
437 check the wrong mode (input vs. output) for a conversion operation,
438 such as FIX. At some point, this should be simplified. */
439
440 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
441 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
442 {
443 HOST_WIDE_INT hv, lv;
444 REAL_VALUE_TYPE d;
445
446 if (GET_CODE (trueop) == CONST_INT)
447 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
448 else
449 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
450
451 REAL_VALUE_FROM_INT (d, lv, hv, mode);
452 d = real_value_truncate (mode, d);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
454 }
455 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
456 && (GET_CODE (trueop) == CONST_DOUBLE
457 || GET_CODE (trueop) == CONST_INT))
458 {
459 HOST_WIDE_INT hv, lv;
460 REAL_VALUE_TYPE d;
461
462 if (GET_CODE (trueop) == CONST_INT)
463 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
464 else
465 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
466
467 if (op_mode == VOIDmode)
468 {
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
471 if (hv < 0)
472 return 0;
473 }
474 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
475 ;
476 else
477 hv = 0, lv &= GET_MODE_MASK (op_mode);
478
479 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
480 d = real_value_truncate (mode, d);
481 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
482 }
483
484 if (GET_CODE (trueop) == CONST_INT
485 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
486 {
487 HOST_WIDE_INT arg0 = INTVAL (trueop);
488 HOST_WIDE_INT val;
489
490 switch (code)
491 {
492 case NOT:
493 val = ~ arg0;
494 break;
495
496 case NEG:
497 val = - arg0;
498 break;
499
500 case ABS:
501 val = (arg0 >= 0 ? arg0 : - arg0);
502 break;
503
504 case FFS:
505 /* Don't use ffs here. Instead, get low order bit and then its
506 number. If arg0 is zero, this will return 0, as desired. */
507 arg0 &= GET_MODE_MASK (mode);
508 val = exact_log2 (arg0 & (- arg0)) + 1;
509 break;
510
511 case CLZ:
512 arg0 &= GET_MODE_MASK (mode);
513 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
514 ;
515 else
516 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
517 break;
518
519 case CTZ:
520 arg0 &= GET_MODE_MASK (mode);
521 if (arg0 == 0)
522 {
523 /* Even if the value at zero is undefined, we have to come
524 up with some replacement. Seems good enough. */
525 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
526 val = GET_MODE_BITSIZE (mode);
527 }
528 else
529 val = exact_log2 (arg0 & -arg0);
530 break;
531
532 case POPCOUNT:
533 arg0 &= GET_MODE_MASK (mode);
534 val = 0;
535 while (arg0)
536 val++, arg0 &= arg0 - 1;
537 break;
538
539 case PARITY:
540 arg0 &= GET_MODE_MASK (mode);
541 val = 0;
542 while (arg0)
543 val++, arg0 &= arg0 - 1;
544 val &= 1;
545 break;
546
547 case TRUNCATE:
548 val = arg0;
549 break;
550
551 case ZERO_EXTEND:
552 /* When zero-extending a CONST_INT, we need to know its
553 original mode. */
554 if (op_mode == VOIDmode)
555 abort ();
556 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
557 {
558 /* If we were really extending the mode,
559 we would have to distinguish between zero-extension
560 and sign-extension. */
561 if (width != GET_MODE_BITSIZE (op_mode))
562 abort ();
563 val = arg0;
564 }
565 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
566 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
567 else
568 return 0;
569 break;
570
571 case SIGN_EXTEND:
572 if (op_mode == VOIDmode)
573 op_mode = mode;
574 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
575 {
576 /* If we were really extending the mode,
577 we would have to distinguish between zero-extension
578 and sign-extension. */
579 if (width != GET_MODE_BITSIZE (op_mode))
580 abort ();
581 val = arg0;
582 }
583 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
584 {
585 val
586 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
587 if (val
588 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
589 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 }
591 else
592 return 0;
593 break;
594
595 case SQRT:
596 case FLOAT_EXTEND:
597 case FLOAT_TRUNCATE:
598 case SS_TRUNCATE:
599 case US_TRUNCATE:
600 return 0;
601
602 default:
603 abort ();
604 }
605
606 val = trunc_int_for_mode (val, mode);
607
608 return GEN_INT (val);
609 }
610
611 /* We can do some operations on integer CONST_DOUBLEs. Also allow
612 for a DImode operation on a CONST_INT. */
613 else if (GET_MODE (trueop) == VOIDmode
614 && width <= HOST_BITS_PER_WIDE_INT * 2
615 && (GET_CODE (trueop) == CONST_DOUBLE
616 || GET_CODE (trueop) == CONST_INT))
617 {
618 unsigned HOST_WIDE_INT l1, lv;
619 HOST_WIDE_INT h1, hv;
620
621 if (GET_CODE (trueop) == CONST_DOUBLE)
622 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
623 else
624 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
625
626 switch (code)
627 {
628 case NOT:
629 lv = ~ l1;
630 hv = ~ h1;
631 break;
632
633 case NEG:
634 neg_double (l1, h1, &lv, &hv);
635 break;
636
637 case ABS:
638 if (h1 < 0)
639 neg_double (l1, h1, &lv, &hv);
640 else
641 lv = l1, hv = h1;
642 break;
643
644 case FFS:
645 hv = 0;
646 if (l1 == 0)
647 {
648 if (h1 == 0)
649 lv = 0;
650 else
651 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
652 }
653 else
654 lv = exact_log2 (l1 & -l1) + 1;
655 break;
656
657 case CLZ:
658 hv = 0;
659 if (h1 != 0)
660 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
661 - HOST_BITS_PER_WIDE_INT;
662 else if (l1 != 0)
663 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
664 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
667
668 case CTZ:
669 hv = 0;
670 if (l1 != 0)
671 lv = exact_log2 (l1 & -l1);
672 else if (h1 != 0)
673 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
674 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
675 lv = GET_MODE_BITSIZE (mode);
676 break;
677
678 case POPCOUNT:
679 hv = 0;
680 lv = 0;
681 while (l1)
682 lv++, l1 &= l1 - 1;
683 while (h1)
684 lv++, h1 &= h1 - 1;
685 break;
686
687 case PARITY:
688 hv = 0;
689 lv = 0;
690 while (l1)
691 lv++, l1 &= l1 - 1;
692 while (h1)
693 lv++, h1 &= h1 - 1;
694 lv &= 1;
695 break;
696
697 case TRUNCATE:
698 /* This is just a change-of-mode, so do nothing. */
699 lv = l1, hv = h1;
700 break;
701
702 case ZERO_EXTEND:
703 if (op_mode == VOIDmode)
704 abort ();
705
706 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
707 return 0;
708
709 hv = 0;
710 lv = l1 & GET_MODE_MASK (op_mode);
711 break;
712
713 case SIGN_EXTEND:
714 if (op_mode == VOIDmode
715 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
716 return 0;
717 else
718 {
719 lv = l1 & GET_MODE_MASK (op_mode);
720 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
721 && (lv & ((HOST_WIDE_INT) 1
722 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
723 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
724
725 hv = HWI_SIGN_EXTEND (lv);
726 }
727 break;
728
729 case SQRT:
730 return 0;
731
732 default:
733 return 0;
734 }
735
736 return immed_double_const (lv, hv, mode);
737 }
738
739 else if (GET_CODE (trueop) == CONST_DOUBLE
740 && GET_MODE_CLASS (mode) == MODE_FLOAT)
741 {
742 REAL_VALUE_TYPE d, t;
743 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
744
745 switch (code)
746 {
747 case SQRT:
748 if (HONOR_SNANS (mode) && real_isnan (&d))
749 return 0;
750 real_sqrt (&t, mode, &d);
751 d = t;
752 break;
753 case ABS:
754 d = REAL_VALUE_ABS (d);
755 break;
756 case NEG:
757 d = REAL_VALUE_NEGATE (d);
758 break;
759 case FLOAT_TRUNCATE:
760 d = real_value_truncate (mode, d);
761 break;
762 case FLOAT_EXTEND:
763 /* All this does is change the mode. */
764 break;
765 case FIX:
766 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
767 break;
768
769 default:
770 abort ();
771 }
772 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
773 }
774
775 else if (GET_CODE (trueop) == CONST_DOUBLE
776 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
777 && GET_MODE_CLASS (mode) == MODE_INT
778 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
779 {
780 HOST_WIDE_INT i;
781 REAL_VALUE_TYPE d;
782 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
783 switch (code)
784 {
785 case FIX: i = REAL_VALUE_FIX (d); break;
786 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
787 default:
788 abort ();
789 }
790 return gen_int_mode (i, mode);
791 }
792
793 /* This was formerly used only for non-IEEE float.
794 eggert@twinsun.com says it is safe for IEEE also. */
795 else
796 {
797 enum rtx_code reversed;
798 rtx temp;
799
800 /* There are some simplifications we can do even if the operands
801 aren't constant. */
802 switch (code)
803 {
804 case NOT:
805 /* (not (not X)) == X. */
806 if (GET_CODE (op) == NOT)
807 return XEXP (op, 0);
808
809 /* (not (eq X Y)) == (ne X Y), etc. */
810 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
811 && ((reversed = reversed_comparison_code (op, NULL_RTX))
812 != UNKNOWN))
813 return simplify_gen_relational (reversed, mode, VOIDmode,
814 XEXP (op, 0), XEXP (op, 1));
815
816 /* (not (plus X -1)) can become (neg X). */
817 if (GET_CODE (op) == PLUS
818 && XEXP (op, 1) == constm1_rtx)
819 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
820
821 /* Similarly, (not (neg X)) is (plus X -1). */
822 if (GET_CODE (op) == NEG)
823 return plus_constant (XEXP (op, 0), -1);
824
825 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
826 if (GET_CODE (op) == XOR
827 && GET_CODE (XEXP (op, 1)) == CONST_INT
828 && (temp = simplify_unary_operation (NOT, mode,
829 XEXP (op, 1),
830 mode)) != 0)
831 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
832
833
834 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
835 operands other than 1, but that is not valid. We could do a
836 similar simplification for (not (lshiftrt C X)) where C is
837 just the sign bit, but this doesn't seem common enough to
838 bother with. */
839 if (GET_CODE (op) == ASHIFT
840 && XEXP (op, 0) == const1_rtx)
841 {
842 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
843 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
844 }
845
846 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
847 by reversing the comparison code if valid. */
848 if (STORE_FLAG_VALUE == -1
849 && GET_RTX_CLASS (GET_CODE (op)) == '<'
850 && (reversed = reversed_comparison_code (op, NULL_RTX))
851 != UNKNOWN)
852 return simplify_gen_relational (reversed, mode, VOIDmode,
853 XEXP (op, 0), XEXP (op, 1));
854
855 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
856 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
857 so we can perform the above simplification. */
858
859 if (STORE_FLAG_VALUE == -1
860 && GET_CODE (op) == ASHIFTRT
861 && GET_CODE (XEXP (op, 1)) == CONST_INT
862 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
863 return simplify_gen_relational (GE, mode, VOIDmode,
864 XEXP (op, 0), const0_rtx);
865
866 break;
867
868 case NEG:
869 /* (neg (neg X)) == X. */
870 if (GET_CODE (op) == NEG)
871 return XEXP (op, 0);
872
873 /* (neg (plus X 1)) can become (not X). */
874 if (GET_CODE (op) == PLUS
875 && XEXP (op, 1) == const1_rtx)
876 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
877
878 /* Similarly, (neg (not X)) is (plus X 1). */
879 if (GET_CODE (op) == NOT)
880 return plus_constant (XEXP (op, 0), 1);
881
882 /* (neg (minus X Y)) can become (minus Y X). This transformation
883 isn't safe for modes with signed zeros, since if X and Y are
884 both +0, (minus Y X) is the same as (minus X Y). If the
885 rounding mode is towards +infinity (or -infinity) then the two
886 expressions will be rounded differently. */
887 if (GET_CODE (op) == MINUS
888 && !HONOR_SIGNED_ZEROS (mode)
889 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
890 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
891 XEXP (op, 0));
892
893 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
894 if (GET_CODE (op) == PLUS
895 && !HONOR_SIGNED_ZEROS (mode)
896 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
897 {
898 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
900 }
901
902 /* (neg (mult A B)) becomes (mult (neg A) B).
903 This works even for floating-point values. */
904 if (GET_CODE (op) == MULT
905 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
906 {
907 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
908 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
909 }
910
911 /* NEG commutes with ASHIFT since it is multiplication. Only do
912 this if we can then eliminate the NEG (e.g., if the operand
913 is a constant). */
914 if (GET_CODE (op) == ASHIFT)
915 {
916 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
917 mode);
918 if (temp)
919 return simplify_gen_binary (ASHIFT, mode, temp,
920 XEXP (op, 1));
921 }
922
923 break;
924
925 case SIGN_EXTEND:
926 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
927 becomes just the MINUS if its mode is MODE. This allows
928 folding switch statements on machines using casesi (such as
929 the VAX). */
930 if (GET_CODE (op) == TRUNCATE
931 && GET_MODE (XEXP (op, 0)) == mode
932 && GET_CODE (XEXP (op, 0)) == MINUS
933 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
934 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
935 return XEXP (op, 0);
936
937 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
938 if (! POINTERS_EXTEND_UNSIGNED
939 && mode == Pmode && GET_MODE (op) == ptr_mode
940 && (CONSTANT_P (op)
941 || (GET_CODE (op) == SUBREG
942 && GET_CODE (SUBREG_REG (op)) == REG
943 && REG_POINTER (SUBREG_REG (op))
944 && GET_MODE (SUBREG_REG (op)) == Pmode)))
945 return convert_memory_address (Pmode, op);
946 #endif
947 break;
948
949 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
950 case ZERO_EXTEND:
951 if (POINTERS_EXTEND_UNSIGNED > 0
952 && mode == Pmode && GET_MODE (op) == ptr_mode
953 && (CONSTANT_P (op)
954 || (GET_CODE (op) == SUBREG
955 && GET_CODE (SUBREG_REG (op)) == REG
956 && REG_POINTER (SUBREG_REG (op))
957 && GET_MODE (SUBREG_REG (op)) == Pmode)))
958 return convert_memory_address (Pmode, op);
959 break;
960 #endif
961
962 default:
963 break;
964 }
965
966 return 0;
967 }
968 }
969 \f
970 /* Subroutine of simplify_associative_operation. Return true if rtx OP
971 is a suitable integer or floating point immediate constant. */
972 static bool
973 associative_constant_p (rtx op)
974 {
975 if (GET_CODE (op) == CONST_INT
976 || GET_CODE (op) == CONST_DOUBLE)
977 return true;
978 op = avoid_constant_pool_reference (op);
979 return GET_CODE (op) == CONST_INT
980 || GET_CODE (op) == CONST_DOUBLE;
981 }
982
983 /* Subroutine of simplify_binary_operation to simplify an associative
984 binary operation CODE with result mode MODE, operating on OP0 and OP1.
985 Return 0 if no simplification is possible. */
986 static rtx
987 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
988 rtx op0, rtx op1)
989 {
990 rtx tem;
991
992 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
993 if (GET_CODE (op0) == code
994 && associative_constant_p (op1)
995 && associative_constant_p (XEXP (op0, 1)))
996 {
997 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
998 if (! tem)
999 return tem;
1000 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1001 }
1002
1003 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1004 if (GET_CODE (op0) == code
1005 && GET_CODE (op1) == code
1006 && associative_constant_p (XEXP (op0, 1))
1007 && associative_constant_p (XEXP (op1, 1)))
1008 {
1009 rtx c = simplify_binary_operation (code, mode,
1010 XEXP (op0, 1), XEXP (op1, 1));
1011 if (! c)
1012 return 0;
1013 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1014 return simplify_gen_binary (code, mode, tem, c);
1015 }
1016
1017 /* Canonicalize (x op c) op y as (x op y) op c. */
1018 if (GET_CODE (op0) == code
1019 && associative_constant_p (XEXP (op0, 1)))
1020 {
1021 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1022 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1023 }
1024
1025 /* Canonicalize x op (y op c) as (x op y) op c. */
1026 if (GET_CODE (op1) == code
1027 && associative_constant_p (XEXP (op1, 1)))
1028 {
1029 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1030 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1031 }
1032
1033 return 0;
1034 }
1035
1036 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1037 and OP1. Return 0 if no simplification is possible.
1038
1039 Don't use this for relational operations such as EQ or LT.
1040 Use simplify_relational_operation instead. */
1041 rtx
1042 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1043 rtx op0, rtx op1)
1044 {
1045 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1046 HOST_WIDE_INT val;
1047 unsigned int width = GET_MODE_BITSIZE (mode);
1048 rtx tem;
1049 rtx trueop0 = avoid_constant_pool_reference (op0);
1050 rtx trueop1 = avoid_constant_pool_reference (op1);
1051
1052 /* Relational operations don't work here. We must know the mode
1053 of the operands in order to do the comparison correctly.
1054 Assuming a full word can give incorrect results.
1055 Consider comparing 128 with -128 in QImode. */
1056
1057 if (GET_RTX_CLASS (code) == '<')
1058 abort ();
1059
1060 /* Make sure the constant is second. */
1061 if (GET_RTX_CLASS (code) == 'c'
1062 && swap_commutative_operands_p (trueop0, trueop1))
1063 {
1064 tem = op0, op0 = op1, op1 = tem;
1065 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1066 }
1067
1068 if (VECTOR_MODE_P (mode)
1069 && GET_CODE (trueop0) == CONST_VECTOR
1070 && GET_CODE (trueop1) == CONST_VECTOR)
1071 {
1072 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1073 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1074 enum machine_mode op0mode = GET_MODE (trueop0);
1075 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1076 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1077 enum machine_mode op1mode = GET_MODE (trueop1);
1078 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1079 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1080 rtvec v = rtvec_alloc (n_elts);
1081 unsigned int i;
1082
1083 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1084 abort ();
1085
1086 for (i = 0; i < n_elts; i++)
1087 {
1088 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1089 CONST_VECTOR_ELT (trueop0, i),
1090 CONST_VECTOR_ELT (trueop1, i));
1091 if (!x)
1092 return 0;
1093 RTVEC_ELT (v, i) = x;
1094 }
1095
1096 return gen_rtx_CONST_VECTOR (mode, v);
1097 }
1098
1099 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1100 && GET_CODE (trueop0) == CONST_DOUBLE
1101 && GET_CODE (trueop1) == CONST_DOUBLE
1102 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1103 {
1104 REAL_VALUE_TYPE f0, f1, value;
1105
1106 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1107 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1108 f0 = real_value_truncate (mode, f0);
1109 f1 = real_value_truncate (mode, f1);
1110
1111 if (HONOR_SNANS (mode)
1112 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1113 return 0;
1114
1115 if (code == DIV
1116 && REAL_VALUES_EQUAL (f1, dconst0)
1117 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1118 return 0;
1119
1120 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1121
1122 value = real_value_truncate (mode, value);
1123 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1124 }
1125
1126 /* We can fold some multi-word operations. */
1127 if (GET_MODE_CLASS (mode) == MODE_INT
1128 && width == HOST_BITS_PER_WIDE_INT * 2
1129 && (GET_CODE (trueop0) == CONST_DOUBLE
1130 || GET_CODE (trueop0) == CONST_INT)
1131 && (GET_CODE (trueop1) == CONST_DOUBLE
1132 || GET_CODE (trueop1) == CONST_INT))
1133 {
1134 unsigned HOST_WIDE_INT l1, l2, lv;
1135 HOST_WIDE_INT h1, h2, hv;
1136
1137 if (GET_CODE (trueop0) == CONST_DOUBLE)
1138 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1139 else
1140 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1141
1142 if (GET_CODE (trueop1) == CONST_DOUBLE)
1143 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1144 else
1145 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1146
1147 switch (code)
1148 {
1149 case MINUS:
1150 /* A - B == A + (-B). */
1151 neg_double (l2, h2, &lv, &hv);
1152 l2 = lv, h2 = hv;
1153
1154 /* Fall through.... */
1155
1156 case PLUS:
1157 add_double (l1, h1, l2, h2, &lv, &hv);
1158 break;
1159
1160 case MULT:
1161 mul_double (l1, h1, l2, h2, &lv, &hv);
1162 break;
1163
1164 case DIV: case MOD: case UDIV: case UMOD:
1165 /* We'd need to include tree.h to do this and it doesn't seem worth
1166 it. */
1167 return 0;
1168
1169 case AND:
1170 lv = l1 & l2, hv = h1 & h2;
1171 break;
1172
1173 case IOR:
1174 lv = l1 | l2, hv = h1 | h2;
1175 break;
1176
1177 case XOR:
1178 lv = l1 ^ l2, hv = h1 ^ h2;
1179 break;
1180
1181 case SMIN:
1182 if (h1 < h2
1183 || (h1 == h2
1184 && ((unsigned HOST_WIDE_INT) l1
1185 < (unsigned HOST_WIDE_INT) l2)))
1186 lv = l1, hv = h1;
1187 else
1188 lv = l2, hv = h2;
1189 break;
1190
1191 case SMAX:
1192 if (h1 > h2
1193 || (h1 == h2
1194 && ((unsigned HOST_WIDE_INT) l1
1195 > (unsigned HOST_WIDE_INT) l2)))
1196 lv = l1, hv = h1;
1197 else
1198 lv = l2, hv = h2;
1199 break;
1200
1201 case UMIN:
1202 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1203 || (h1 == h2
1204 && ((unsigned HOST_WIDE_INT) l1
1205 < (unsigned HOST_WIDE_INT) l2)))
1206 lv = l1, hv = h1;
1207 else
1208 lv = l2, hv = h2;
1209 break;
1210
1211 case UMAX:
1212 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1213 || (h1 == h2
1214 && ((unsigned HOST_WIDE_INT) l1
1215 > (unsigned HOST_WIDE_INT) l2)))
1216 lv = l1, hv = h1;
1217 else
1218 lv = l2, hv = h2;
1219 break;
1220
1221 case LSHIFTRT: case ASHIFTRT:
1222 case ASHIFT:
1223 case ROTATE: case ROTATERT:
1224 #ifdef SHIFT_COUNT_TRUNCATED
1225 if (SHIFT_COUNT_TRUNCATED)
1226 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1227 #endif
1228
1229 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1230 return 0;
1231
1232 if (code == LSHIFTRT || code == ASHIFTRT)
1233 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1234 code == ASHIFTRT);
1235 else if (code == ASHIFT)
1236 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1237 else if (code == ROTATE)
1238 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1239 else /* code == ROTATERT */
1240 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1241 break;
1242
1243 default:
1244 return 0;
1245 }
1246
1247 return immed_double_const (lv, hv, mode);
1248 }
1249
1250 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1251 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1252 {
1253 /* Even if we can't compute a constant result,
1254 there are some cases worth simplifying. */
1255
1256 switch (code)
1257 {
1258 case PLUS:
1259 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1260 when x is NaN, infinite, or finite and nonzero. They aren't
1261 when x is -0 and the rounding mode is not towards -infinity,
1262 since (-0) + 0 is then 0. */
1263 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1264 return op0;
1265
1266 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1267 transformations are safe even for IEEE. */
1268 if (GET_CODE (op0) == NEG)
1269 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1270 else if (GET_CODE (op1) == NEG)
1271 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1272
1273 /* (~a) + 1 -> -a */
1274 if (INTEGRAL_MODE_P (mode)
1275 && GET_CODE (op0) == NOT
1276 && trueop1 == const1_rtx)
1277 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1278
1279 /* Handle both-operands-constant cases. We can only add
1280 CONST_INTs to constants since the sum of relocatable symbols
1281 can't be handled by most assemblers. Don't add CONST_INT
1282 to CONST_INT since overflow won't be computed properly if wider
1283 than HOST_BITS_PER_WIDE_INT. */
1284
1285 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1286 && GET_CODE (op1) == CONST_INT)
1287 return plus_constant (op0, INTVAL (op1));
1288 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1289 && GET_CODE (op0) == CONST_INT)
1290 return plus_constant (op1, INTVAL (op0));
1291
1292 /* See if this is something like X * C - X or vice versa or
1293 if the multiplication is written as a shift. If so, we can
1294 distribute and make a new multiply, shift, or maybe just
1295 have X (if C is 2 in the example above). But don't make
1296 real multiply if we didn't have one before. */
1297
1298 if (! FLOAT_MODE_P (mode))
1299 {
1300 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1301 rtx lhs = op0, rhs = op1;
1302 int had_mult = 0;
1303
1304 if (GET_CODE (lhs) == NEG)
1305 coeff0 = -1, lhs = XEXP (lhs, 0);
1306 else if (GET_CODE (lhs) == MULT
1307 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1308 {
1309 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1310 had_mult = 1;
1311 }
1312 else if (GET_CODE (lhs) == ASHIFT
1313 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1314 && INTVAL (XEXP (lhs, 1)) >= 0
1315 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1316 {
1317 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1318 lhs = XEXP (lhs, 0);
1319 }
1320
1321 if (GET_CODE (rhs) == NEG)
1322 coeff1 = -1, rhs = XEXP (rhs, 0);
1323 else if (GET_CODE (rhs) == MULT
1324 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1325 {
1326 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1327 had_mult = 1;
1328 }
1329 else if (GET_CODE (rhs) == ASHIFT
1330 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1331 && INTVAL (XEXP (rhs, 1)) >= 0
1332 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1333 {
1334 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1335 rhs = XEXP (rhs, 0);
1336 }
1337
1338 if (rtx_equal_p (lhs, rhs))
1339 {
1340 tem = simplify_gen_binary (MULT, mode, lhs,
1341 GEN_INT (coeff0 + coeff1));
1342 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1343 }
1344 }
1345
1346 /* If one of the operands is a PLUS or a MINUS, see if we can
1347 simplify this by the associative law.
1348 Don't use the associative law for floating point.
1349 The inaccuracy makes it nonassociative,
1350 and subtle programs can break if operations are associated. */
1351
1352 if (INTEGRAL_MODE_P (mode)
1353 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1354 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1355 || (GET_CODE (op0) == CONST
1356 && GET_CODE (XEXP (op0, 0)) == PLUS)
1357 || (GET_CODE (op1) == CONST
1358 && GET_CODE (XEXP (op1, 0)) == PLUS))
1359 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1360 return tem;
1361
1362 /* Reassociate floating point addition only when the user
1363 specifies unsafe math optimizations. */
1364 if (FLOAT_MODE_P (mode)
1365 && flag_unsafe_math_optimizations)
1366 {
1367 tem = simplify_associative_operation (code, mode, op0, op1);
1368 if (tem)
1369 return tem;
1370 }
1371 break;
1372
1373 case COMPARE:
1374 #ifdef HAVE_cc0
1375 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1376 using cc0, in which case we want to leave it as a COMPARE
1377 so we can distinguish it from a register-register-copy.
1378
1379 In IEEE floating point, x-0 is not the same as x. */
1380
1381 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1382 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1383 && trueop1 == CONST0_RTX (mode))
1384 return op0;
1385 #endif
1386
1387 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1388 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1389 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1390 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1391 {
1392 rtx xop00 = XEXP (op0, 0);
1393 rtx xop10 = XEXP (op1, 0);
1394
1395 #ifdef HAVE_cc0
1396 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1397 #else
1398 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1399 && GET_MODE (xop00) == GET_MODE (xop10)
1400 && REGNO (xop00) == REGNO (xop10)
1401 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1402 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1403 #endif
1404 return xop00;
1405 }
1406 break;
1407
1408 case MINUS:
1409 /* We can't assume x-x is 0 even with non-IEEE floating point,
1410 but since it is zero except in very strange circumstances, we
1411 will treat it as zero with -funsafe-math-optimizations. */
1412 if (rtx_equal_p (trueop0, trueop1)
1413 && ! side_effects_p (op0)
1414 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1415 return CONST0_RTX (mode);
1416
1417 /* Change subtraction from zero into negation. (0 - x) is the
1418 same as -x when x is NaN, infinite, or finite and nonzero.
1419 But if the mode has signed zeros, and does not round towards
1420 -infinity, then 0 - 0 is 0, not -0. */
1421 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1422 return simplify_gen_unary (NEG, mode, op1, mode);
1423
1424 /* (-1 - a) is ~a. */
1425 if (trueop0 == constm1_rtx)
1426 return simplify_gen_unary (NOT, mode, op1, mode);
1427
1428 /* Subtracting 0 has no effect unless the mode has signed zeros
1429 and supports rounding towards -infinity. In such a case,
1430 0 - 0 is -0. */
1431 if (!(HONOR_SIGNED_ZEROS (mode)
1432 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1433 && trueop1 == CONST0_RTX (mode))
1434 return op0;
1435
1436 /* See if this is something like X * C - X or vice versa or
1437 if the multiplication is written as a shift. If so, we can
1438 distribute and make a new multiply, shift, or maybe just
1439 have X (if C is 2 in the example above). But don't make
1440 real multiply if we didn't have one before. */
1441
1442 if (! FLOAT_MODE_P (mode))
1443 {
1444 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1445 rtx lhs = op0, rhs = op1;
1446 int had_mult = 0;
1447
1448 if (GET_CODE (lhs) == NEG)
1449 coeff0 = -1, lhs = XEXP (lhs, 0);
1450 else if (GET_CODE (lhs) == MULT
1451 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1452 {
1453 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1454 had_mult = 1;
1455 }
1456 else if (GET_CODE (lhs) == ASHIFT
1457 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1458 && INTVAL (XEXP (lhs, 1)) >= 0
1459 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1460 {
1461 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1462 lhs = XEXP (lhs, 0);
1463 }
1464
1465 if (GET_CODE (rhs) == NEG)
1466 coeff1 = - 1, rhs = XEXP (rhs, 0);
1467 else if (GET_CODE (rhs) == MULT
1468 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1469 {
1470 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1471 had_mult = 1;
1472 }
1473 else if (GET_CODE (rhs) == ASHIFT
1474 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1475 && INTVAL (XEXP (rhs, 1)) >= 0
1476 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1477 {
1478 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1479 rhs = XEXP (rhs, 0);
1480 }
1481
1482 if (rtx_equal_p (lhs, rhs))
1483 {
1484 tem = simplify_gen_binary (MULT, mode, lhs,
1485 GEN_INT (coeff0 - coeff1));
1486 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1487 }
1488 }
1489
1490 /* (a - (-b)) -> (a + b). True even for IEEE. */
1491 if (GET_CODE (op1) == NEG)
1492 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1493
1494 /* If one of the operands is a PLUS or a MINUS, see if we can
1495 simplify this by the associative law.
1496 Don't use the associative law for floating point.
1497 The inaccuracy makes it nonassociative,
1498 and subtle programs can break if operations are associated. */
1499
1500 if (INTEGRAL_MODE_P (mode)
1501 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1502 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1503 || (GET_CODE (op0) == CONST
1504 && GET_CODE (XEXP (op0, 0)) == PLUS)
1505 || (GET_CODE (op1) == CONST
1506 && GET_CODE (XEXP (op1, 0)) == PLUS))
1507 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1508 return tem;
1509
1510 /* Don't let a relocatable value get a negative coeff. */
1511 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1512 return simplify_gen_binary (PLUS, mode,
1513 op0,
1514 neg_const_int (mode, op1));
1515
1516 /* (x - (x & y)) -> (x & ~y) */
1517 if (GET_CODE (op1) == AND)
1518 {
1519 if (rtx_equal_p (op0, XEXP (op1, 0)))
1520 {
1521 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1522 GET_MODE (XEXP (op1, 1)));
1523 return simplify_gen_binary (AND, mode, op0, tem);
1524 }
1525 if (rtx_equal_p (op0, XEXP (op1, 1)))
1526 {
1527 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1528 GET_MODE (XEXP (op1, 0)));
1529 return simplify_gen_binary (AND, mode, op0, tem);
1530 }
1531 }
1532 break;
1533
1534 case MULT:
1535 if (trueop1 == constm1_rtx)
1536 return simplify_gen_unary (NEG, mode, op0, mode);
1537
1538 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1539 x is NaN, since x * 0 is then also NaN. Nor is it valid
1540 when the mode has signed zeros, since multiplying a negative
1541 number by 0 will give -0, not 0. */
1542 if (!HONOR_NANS (mode)
1543 && !HONOR_SIGNED_ZEROS (mode)
1544 && trueop1 == CONST0_RTX (mode)
1545 && ! side_effects_p (op0))
1546 return op1;
1547
1548 /* In IEEE floating point, x*1 is not equivalent to x for
1549 signalling NaNs. */
1550 if (!HONOR_SNANS (mode)
1551 && trueop1 == CONST1_RTX (mode))
1552 return op0;
1553
1554 /* Convert multiply by constant power of two into shift unless
1555 we are still generating RTL. This test is a kludge. */
1556 if (GET_CODE (trueop1) == CONST_INT
1557 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1558 /* If the mode is larger than the host word size, and the
1559 uppermost bit is set, then this isn't a power of two due
1560 to implicit sign extension. */
1561 && (width <= HOST_BITS_PER_WIDE_INT
1562 || val != HOST_BITS_PER_WIDE_INT - 1)
1563 && ! rtx_equal_function_value_matters)
1564 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1565
1566 /* x*2 is x+x and x*(-1) is -x */
1567 if (GET_CODE (trueop1) == CONST_DOUBLE
1568 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1569 && GET_MODE (op0) == mode)
1570 {
1571 REAL_VALUE_TYPE d;
1572 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1573
1574 if (REAL_VALUES_EQUAL (d, dconst2))
1575 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1576
1577 if (REAL_VALUES_EQUAL (d, dconstm1))
1578 return simplify_gen_unary (NEG, mode, op0, mode);
1579 }
1580
1581 /* Reassociate multiplication, but for floating point MULTs
1582 only when the user specifies unsafe math optimizations. */
1583 if (! FLOAT_MODE_P (mode)
1584 || flag_unsafe_math_optimizations)
1585 {
1586 tem = simplify_associative_operation (code, mode, op0, op1);
1587 if (tem)
1588 return tem;
1589 }
1590 break;
1591
1592 case IOR:
1593 if (trueop1 == const0_rtx)
1594 return op0;
1595 if (GET_CODE (trueop1) == CONST_INT
1596 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1597 == GET_MODE_MASK (mode)))
1598 return op1;
1599 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1600 return op0;
1601 /* A | (~A) -> -1 */
1602 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1603 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1604 && ! side_effects_p (op0)
1605 && GET_MODE_CLASS (mode) != MODE_CC)
1606 return constm1_rtx;
1607 tem = simplify_associative_operation (code, mode, op0, op1);
1608 if (tem)
1609 return tem;
1610 break;
1611
1612 case XOR:
1613 if (trueop1 == const0_rtx)
1614 return op0;
1615 if (GET_CODE (trueop1) == CONST_INT
1616 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1617 == GET_MODE_MASK (mode)))
1618 return simplify_gen_unary (NOT, mode, op0, mode);
1619 if (trueop0 == trueop1 && ! side_effects_p (op0)
1620 && GET_MODE_CLASS (mode) != MODE_CC)
1621 return const0_rtx;
1622 tem = simplify_associative_operation (code, mode, op0, op1);
1623 if (tem)
1624 return tem;
1625 break;
1626
1627 case AND:
1628 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1629 return const0_rtx;
1630 if (GET_CODE (trueop1) == CONST_INT
1631 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1632 == GET_MODE_MASK (mode)))
1633 return op0;
1634 if (trueop0 == trueop1 && ! side_effects_p (op0)
1635 && GET_MODE_CLASS (mode) != MODE_CC)
1636 return op0;
1637 /* A & (~A) -> 0 */
1638 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1639 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1640 && ! side_effects_p (op0)
1641 && GET_MODE_CLASS (mode) != MODE_CC)
1642 return const0_rtx;
1643 tem = simplify_associative_operation (code, mode, op0, op1);
1644 if (tem)
1645 return tem;
1646 break;
1647
1648 case UDIV:
1649 /* Convert divide by power of two into shift (divide by 1 handled
1650 below). */
1651 if (GET_CODE (trueop1) == CONST_INT
1652 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1653 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1654
1655 /* Fall through.... */
1656
1657 case DIV:
1658 if (trueop1 == CONST1_RTX (mode))
1659 {
1660 /* On some platforms DIV uses narrower mode than its
1661 operands. */
1662 rtx x = gen_lowpart_common (mode, op0);
1663 if (x)
1664 return x;
1665 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1666 return gen_lowpart_SUBREG (mode, op0);
1667 else
1668 return op0;
1669 }
1670
1671 /* Maybe change 0 / x to 0. This transformation isn't safe for
1672 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1673 Nor is it safe for modes with signed zeros, since dividing
1674 0 by a negative number gives -0, not 0. */
1675 if (!HONOR_NANS (mode)
1676 && !HONOR_SIGNED_ZEROS (mode)
1677 && trueop0 == CONST0_RTX (mode)
1678 && ! side_effects_p (op1))
1679 return op0;
1680
1681 /* Change division by a constant into multiplication. Only do
1682 this with -funsafe-math-optimizations. */
1683 else if (GET_CODE (trueop1) == CONST_DOUBLE
1684 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1685 && trueop1 != CONST0_RTX (mode)
1686 && flag_unsafe_math_optimizations)
1687 {
1688 REAL_VALUE_TYPE d;
1689 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1690
1691 if (! REAL_VALUES_EQUAL (d, dconst0))
1692 {
1693 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1694 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1695 return simplify_gen_binary (MULT, mode, op0, tem);
1696 }
1697 }
1698 break;
1699
1700 case UMOD:
1701 /* Handle modulus by power of two (mod with 1 handled below). */
1702 if (GET_CODE (trueop1) == CONST_INT
1703 && exact_log2 (INTVAL (trueop1)) > 0)
1704 return simplify_gen_binary (AND, mode, op0,
1705 GEN_INT (INTVAL (op1) - 1));
1706
1707 /* Fall through.... */
1708
1709 case MOD:
1710 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1711 && ! side_effects_p (op0) && ! side_effects_p (op1))
1712 return const0_rtx;
1713 break;
1714
1715 case ROTATERT:
1716 case ROTATE:
1717 case ASHIFTRT:
1718 /* Rotating ~0 always results in ~0. */
1719 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1720 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1721 && ! side_effects_p (op1))
1722 return op0;
1723
1724 /* Fall through.... */
1725
1726 case ASHIFT:
1727 case LSHIFTRT:
1728 if (trueop1 == const0_rtx)
1729 return op0;
1730 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1731 return op0;
1732 break;
1733
1734 case SMIN:
1735 if (width <= HOST_BITS_PER_WIDE_INT
1736 && GET_CODE (trueop1) == CONST_INT
1737 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1738 && ! side_effects_p (op0))
1739 return op1;
1740 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1741 return op0;
1742 tem = simplify_associative_operation (code, mode, op0, op1);
1743 if (tem)
1744 return tem;
1745 break;
1746
1747 case SMAX:
1748 if (width <= HOST_BITS_PER_WIDE_INT
1749 && GET_CODE (trueop1) == CONST_INT
1750 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1751 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1752 && ! side_effects_p (op0))
1753 return op1;
1754 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1755 return op0;
1756 tem = simplify_associative_operation (code, mode, op0, op1);
1757 if (tem)
1758 return tem;
1759 break;
1760
1761 case UMIN:
1762 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1763 return op1;
1764 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1765 return op0;
1766 tem = simplify_associative_operation (code, mode, op0, op1);
1767 if (tem)
1768 return tem;
1769 break;
1770
1771 case UMAX:
1772 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1773 return op1;
1774 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1775 return op0;
1776 tem = simplify_associative_operation (code, mode, op0, op1);
1777 if (tem)
1778 return tem;
1779 break;
1780
1781 case SS_PLUS:
1782 case US_PLUS:
1783 case SS_MINUS:
1784 case US_MINUS:
1785 /* ??? There are simplifications that can be done. */
1786 return 0;
1787
1788 case VEC_SELECT:
1789 if (!VECTOR_MODE_P (mode))
1790 {
1791 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1792 || (mode
1793 != GET_MODE_INNER (GET_MODE (trueop0)))
1794 || GET_CODE (trueop1) != PARALLEL
1795 || XVECLEN (trueop1, 0) != 1
1796 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1797 abort ();
1798
1799 if (GET_CODE (trueop0) == CONST_VECTOR)
1800 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1801 }
1802 else
1803 {
1804 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1805 || (GET_MODE_INNER (mode)
1806 != GET_MODE_INNER (GET_MODE (trueop0)))
1807 || GET_CODE (trueop1) != PARALLEL)
1808 abort ();
1809
1810 if (GET_CODE (trueop0) == CONST_VECTOR)
1811 {
1812 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1813 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1814 rtvec v = rtvec_alloc (n_elts);
1815 unsigned int i;
1816
1817 if (XVECLEN (trueop1, 0) != (int) n_elts)
1818 abort ();
1819 for (i = 0; i < n_elts; i++)
1820 {
1821 rtx x = XVECEXP (trueop1, 0, i);
1822
1823 if (GET_CODE (x) != CONST_INT)
1824 abort ();
1825 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1826 }
1827
1828 return gen_rtx_CONST_VECTOR (mode, v);
1829 }
1830 }
1831 return 0;
1832 case VEC_CONCAT:
1833 {
1834 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1835 ? GET_MODE (trueop0)
1836 : GET_MODE_INNER (mode));
1837 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1838 ? GET_MODE (trueop1)
1839 : GET_MODE_INNER (mode));
1840
1841 if (!VECTOR_MODE_P (mode)
1842 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1843 != GET_MODE_SIZE (mode)))
1844 abort ();
1845
1846 if ((VECTOR_MODE_P (op0_mode)
1847 && (GET_MODE_INNER (mode)
1848 != GET_MODE_INNER (op0_mode)))
1849 || (!VECTOR_MODE_P (op0_mode)
1850 && GET_MODE_INNER (mode) != op0_mode))
1851 abort ();
1852
1853 if ((VECTOR_MODE_P (op1_mode)
1854 && (GET_MODE_INNER (mode)
1855 != GET_MODE_INNER (op1_mode)))
1856 || (!VECTOR_MODE_P (op1_mode)
1857 && GET_MODE_INNER (mode) != op1_mode))
1858 abort ();
1859
1860 if ((GET_CODE (trueop0) == CONST_VECTOR
1861 || GET_CODE (trueop0) == CONST_INT
1862 || GET_CODE (trueop0) == CONST_DOUBLE)
1863 && (GET_CODE (trueop1) == CONST_VECTOR
1864 || GET_CODE (trueop1) == CONST_INT
1865 || GET_CODE (trueop1) == CONST_DOUBLE))
1866 {
1867 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1868 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1869 rtvec v = rtvec_alloc (n_elts);
1870 unsigned int i;
1871 unsigned in_n_elts = 1;
1872
1873 if (VECTOR_MODE_P (op0_mode))
1874 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1875 for (i = 0; i < n_elts; i++)
1876 {
1877 if (i < in_n_elts)
1878 {
1879 if (!VECTOR_MODE_P (op0_mode))
1880 RTVEC_ELT (v, i) = trueop0;
1881 else
1882 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1883 }
1884 else
1885 {
1886 if (!VECTOR_MODE_P (op1_mode))
1887 RTVEC_ELT (v, i) = trueop1;
1888 else
1889 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1890 i - in_n_elts);
1891 }
1892 }
1893
1894 return gen_rtx_CONST_VECTOR (mode, v);
1895 }
1896 }
1897 return 0;
1898
1899 default:
1900 abort ();
1901 }
1902
1903 return 0;
1904 }
1905
1906 /* Get the integer argument values in two forms:
1907 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1908
1909 arg0 = INTVAL (trueop0);
1910 arg1 = INTVAL (trueop1);
1911
1912 if (width < HOST_BITS_PER_WIDE_INT)
1913 {
1914 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1915 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1916
1917 arg0s = arg0;
1918 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1919 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1920
1921 arg1s = arg1;
1922 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1923 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1924 }
1925 else
1926 {
1927 arg0s = arg0;
1928 arg1s = arg1;
1929 }
1930
1931 /* Compute the value of the arithmetic. */
1932
1933 switch (code)
1934 {
1935 case PLUS:
1936 val = arg0s + arg1s;
1937 break;
1938
1939 case MINUS:
1940 val = arg0s - arg1s;
1941 break;
1942
1943 case MULT:
1944 val = arg0s * arg1s;
1945 break;
1946
1947 case DIV:
1948 if (arg1s == 0
1949 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1950 && arg1s == -1))
1951 return 0;
1952 val = arg0s / arg1s;
1953 break;
1954
1955 case MOD:
1956 if (arg1s == 0
1957 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1958 && arg1s == -1))
1959 return 0;
1960 val = arg0s % arg1s;
1961 break;
1962
1963 case UDIV:
1964 if (arg1 == 0
1965 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1966 && arg1s == -1))
1967 return 0;
1968 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1969 break;
1970
1971 case UMOD:
1972 if (arg1 == 0
1973 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1974 && arg1s == -1))
1975 return 0;
1976 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1977 break;
1978
1979 case AND:
1980 val = arg0 & arg1;
1981 break;
1982
1983 case IOR:
1984 val = arg0 | arg1;
1985 break;
1986
1987 case XOR:
1988 val = arg0 ^ arg1;
1989 break;
1990
1991 case LSHIFTRT:
1992 /* If shift count is undefined, don't fold it; let the machine do
1993 what it wants. But truncate it if the machine will do that. */
1994 if (arg1 < 0)
1995 return 0;
1996
1997 #ifdef SHIFT_COUNT_TRUNCATED
1998 if (SHIFT_COUNT_TRUNCATED)
1999 arg1 %= width;
2000 #endif
2001
2002 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2003 break;
2004
2005 case ASHIFT:
2006 if (arg1 < 0)
2007 return 0;
2008
2009 #ifdef SHIFT_COUNT_TRUNCATED
2010 if (SHIFT_COUNT_TRUNCATED)
2011 arg1 %= width;
2012 #endif
2013
2014 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2015 break;
2016
2017 case ASHIFTRT:
2018 if (arg1 < 0)
2019 return 0;
2020
2021 #ifdef SHIFT_COUNT_TRUNCATED
2022 if (SHIFT_COUNT_TRUNCATED)
2023 arg1 %= width;
2024 #endif
2025
2026 val = arg0s >> arg1;
2027
2028 /* Bootstrap compiler may not have sign extended the right shift.
2029 Manually extend the sign to insure bootstrap cc matches gcc. */
2030 if (arg0s < 0 && arg1 > 0)
2031 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2032
2033 break;
2034
2035 case ROTATERT:
2036 if (arg1 < 0)
2037 return 0;
2038
2039 arg1 %= width;
2040 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2041 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2042 break;
2043
2044 case ROTATE:
2045 if (arg1 < 0)
2046 return 0;
2047
2048 arg1 %= width;
2049 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2050 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2051 break;
2052
2053 case COMPARE:
2054 /* Do nothing here. */
2055 return 0;
2056
2057 case SMIN:
2058 val = arg0s <= arg1s ? arg0s : arg1s;
2059 break;
2060
2061 case UMIN:
2062 val = ((unsigned HOST_WIDE_INT) arg0
2063 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2064 break;
2065
2066 case SMAX:
2067 val = arg0s > arg1s ? arg0s : arg1s;
2068 break;
2069
2070 case UMAX:
2071 val = ((unsigned HOST_WIDE_INT) arg0
2072 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2073 break;
2074
2075 case SS_PLUS:
2076 case US_PLUS:
2077 case SS_MINUS:
2078 case US_MINUS:
2079 /* ??? There are simplifications that can be done. */
2080 return 0;
2081
2082 default:
2083 abort ();
2084 }
2085
2086 val = trunc_int_for_mode (val, mode);
2087
2088 return GEN_INT (val);
2089 }
2090 \f
2091 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2092 PLUS or MINUS.
2093
2094 Rather than test for specific case, we do this by a brute-force method
2095 and do all possible simplifications until no more changes occur. Then
2096 we rebuild the operation.
2097
2098 If FORCE is true, then always generate the rtx. This is used to
2099 canonicalize stuff emitted from simplify_gen_binary. Note that this
2100 can still fail if the rtx is too complex. It won't fail just because
2101 the result is not 'simpler' than the input, however. */
2102
2103 struct simplify_plus_minus_op_data
2104 {
2105 rtx op;
2106 int neg;
2107 };
2108
2109 static int
2110 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2111 {
2112 const struct simplify_plus_minus_op_data *d1 = p1;
2113 const struct simplify_plus_minus_op_data *d2 = p2;
2114
2115 return (commutative_operand_precedence (d2->op)
2116 - commutative_operand_precedence (d1->op));
2117 }
2118
2119 static rtx
2120 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2121 rtx op1, int force)
2122 {
2123 struct simplify_plus_minus_op_data ops[8];
2124 rtx result, tem;
2125 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2126 int first, negate, changed;
2127 int i, j;
2128
2129 memset (ops, 0, sizeof ops);
2130
2131 /* Set up the two operands and then expand them until nothing has been
2132 changed. If we run out of room in our array, give up; this should
2133 almost never happen. */
2134
2135 ops[0].op = op0;
2136 ops[0].neg = 0;
2137 ops[1].op = op1;
2138 ops[1].neg = (code == MINUS);
2139
2140 do
2141 {
2142 changed = 0;
2143
2144 for (i = 0; i < n_ops; i++)
2145 {
2146 rtx this_op = ops[i].op;
2147 int this_neg = ops[i].neg;
2148 enum rtx_code this_code = GET_CODE (this_op);
2149
2150 switch (this_code)
2151 {
2152 case PLUS:
2153 case MINUS:
2154 if (n_ops == 7)
2155 return NULL_RTX;
2156
2157 ops[n_ops].op = XEXP (this_op, 1);
2158 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2159 n_ops++;
2160
2161 ops[i].op = XEXP (this_op, 0);
2162 input_ops++;
2163 changed = 1;
2164 break;
2165
2166 case NEG:
2167 ops[i].op = XEXP (this_op, 0);
2168 ops[i].neg = ! this_neg;
2169 changed = 1;
2170 break;
2171
2172 case CONST:
2173 if (n_ops < 7
2174 && GET_CODE (XEXP (this_op, 0)) == PLUS
2175 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2176 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2177 {
2178 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2179 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2180 ops[n_ops].neg = this_neg;
2181 n_ops++;
2182 input_consts++;
2183 changed = 1;
2184 }
2185 break;
2186
2187 case NOT:
2188 /* ~a -> (-a - 1) */
2189 if (n_ops != 7)
2190 {
2191 ops[n_ops].op = constm1_rtx;
2192 ops[n_ops++].neg = this_neg;
2193 ops[i].op = XEXP (this_op, 0);
2194 ops[i].neg = !this_neg;
2195 changed = 1;
2196 }
2197 break;
2198
2199 case CONST_INT:
2200 if (this_neg)
2201 {
2202 ops[i].op = neg_const_int (mode, this_op);
2203 ops[i].neg = 0;
2204 changed = 1;
2205 }
2206 break;
2207
2208 default:
2209 break;
2210 }
2211 }
2212 }
2213 while (changed);
2214
2215 /* If we only have two operands, we can't do anything. */
2216 if (n_ops <= 2 && !force)
2217 return NULL_RTX;
2218
2219 /* Count the number of CONSTs we didn't split above. */
2220 for (i = 0; i < n_ops; i++)
2221 if (GET_CODE (ops[i].op) == CONST)
2222 input_consts++;
2223
2224 /* Now simplify each pair of operands until nothing changes. The first
2225 time through just simplify constants against each other. */
2226
2227 first = 1;
2228 do
2229 {
2230 changed = first;
2231
2232 for (i = 0; i < n_ops - 1; i++)
2233 for (j = i + 1; j < n_ops; j++)
2234 {
2235 rtx lhs = ops[i].op, rhs = ops[j].op;
2236 int lneg = ops[i].neg, rneg = ops[j].neg;
2237
2238 if (lhs != 0 && rhs != 0
2239 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2240 {
2241 enum rtx_code ncode = PLUS;
2242
2243 if (lneg != rneg)
2244 {
2245 ncode = MINUS;
2246 if (lneg)
2247 tem = lhs, lhs = rhs, rhs = tem;
2248 }
2249 else if (swap_commutative_operands_p (lhs, rhs))
2250 tem = lhs, lhs = rhs, rhs = tem;
2251
2252 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2253
2254 /* Reject "simplifications" that just wrap the two
2255 arguments in a CONST. Failure to do so can result
2256 in infinite recursion with simplify_binary_operation
2257 when it calls us to simplify CONST operations. */
2258 if (tem
2259 && ! (GET_CODE (tem) == CONST
2260 && GET_CODE (XEXP (tem, 0)) == ncode
2261 && XEXP (XEXP (tem, 0), 0) == lhs
2262 && XEXP (XEXP (tem, 0), 1) == rhs)
2263 /* Don't allow -x + -1 -> ~x simplifications in the
2264 first pass. This allows us the chance to combine
2265 the -1 with other constants. */
2266 && ! (first
2267 && GET_CODE (tem) == NOT
2268 && XEXP (tem, 0) == rhs))
2269 {
2270 lneg &= rneg;
2271 if (GET_CODE (tem) == NEG)
2272 tem = XEXP (tem, 0), lneg = !lneg;
2273 if (GET_CODE (tem) == CONST_INT && lneg)
2274 tem = neg_const_int (mode, tem), lneg = 0;
2275
2276 ops[i].op = tem;
2277 ops[i].neg = lneg;
2278 ops[j].op = NULL_RTX;
2279 changed = 1;
2280 }
2281 }
2282 }
2283
2284 first = 0;
2285 }
2286 while (changed);
2287
2288 /* Pack all the operands to the lower-numbered entries. */
2289 for (i = 0, j = 0; j < n_ops; j++)
2290 if (ops[j].op)
2291 ops[i++] = ops[j];
2292 n_ops = i;
2293
2294 /* Sort the operations based on swap_commutative_operands_p. */
2295 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2296
2297 /* We suppressed creation of trivial CONST expressions in the
2298 combination loop to avoid recursion. Create one manually now.
2299 The combination loop should have ensured that there is exactly
2300 one CONST_INT, and the sort will have ensured that it is last
2301 in the array and that any other constant will be next-to-last. */
2302
2303 if (n_ops > 1
2304 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2305 && CONSTANT_P (ops[n_ops - 2].op))
2306 {
2307 rtx value = ops[n_ops - 1].op;
2308 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2309 value = neg_const_int (mode, value);
2310 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2311 n_ops--;
2312 }
2313
2314 /* Count the number of CONSTs that we generated. */
2315 n_consts = 0;
2316 for (i = 0; i < n_ops; i++)
2317 if (GET_CODE (ops[i].op) == CONST)
2318 n_consts++;
2319
2320 /* Give up if we didn't reduce the number of operands we had. Make
2321 sure we count a CONST as two operands. If we have the same
2322 number of operands, but have made more CONSTs than before, this
2323 is also an improvement, so accept it. */
2324 if (!force
2325 && (n_ops + n_consts > input_ops
2326 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2327 return NULL_RTX;
2328
2329 /* Put a non-negated operand first. If there aren't any, make all
2330 operands positive and negate the whole thing later. */
2331
2332 negate = 0;
2333 for (i = 0; i < n_ops && ops[i].neg; i++)
2334 continue;
2335 if (i == n_ops)
2336 {
2337 for (i = 0; i < n_ops; i++)
2338 ops[i].neg = 0;
2339 negate = 1;
2340 }
2341 else if (i != 0)
2342 {
2343 tem = ops[0].op;
2344 ops[0] = ops[i];
2345 ops[i].op = tem;
2346 ops[i].neg = 1;
2347 }
2348
2349 /* Now make the result by performing the requested operations. */
2350 result = ops[0].op;
2351 for (i = 1; i < n_ops; i++)
2352 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2353 mode, result, ops[i].op);
2354
2355 return negate ? gen_rtx_NEG (mode, result) : result;
2356 }
2357
2358 /* Like simplify_binary_operation except used for relational operators.
2359 MODE is the mode of the operands, not that of the result. If MODE
2360 is VOIDmode, both operands must also be VOIDmode and we compare the
2361 operands in "infinite precision".
2362
2363 If no simplification is possible, this function returns zero. Otherwise,
2364 it returns either const_true_rtx or const0_rtx. */
2365
2366 rtx
2367 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2368 rtx op0, rtx op1)
2369 {
2370 int equal, op0lt, op0ltu, op1lt, op1ltu;
2371 rtx tem;
2372 rtx trueop0;
2373 rtx trueop1;
2374
2375 if (mode == VOIDmode
2376 && (GET_MODE (op0) != VOIDmode
2377 || GET_MODE (op1) != VOIDmode))
2378 abort ();
2379
2380 /* If op0 is a compare, extract the comparison arguments from it. */
2381 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2382 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2383
2384 trueop0 = avoid_constant_pool_reference (op0);
2385 trueop1 = avoid_constant_pool_reference (op1);
2386
2387 /* We can't simplify MODE_CC values since we don't know what the
2388 actual comparison is. */
2389 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2390 return 0;
2391
2392 /* Make sure the constant is second. */
2393 if (swap_commutative_operands_p (trueop0, trueop1))
2394 {
2395 tem = op0, op0 = op1, op1 = tem;
2396 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2397 code = swap_condition (code);
2398 }
2399
2400 /* For integer comparisons of A and B maybe we can simplify A - B and can
2401 then simplify a comparison of that with zero. If A and B are both either
2402 a register or a CONST_INT, this can't help; testing for these cases will
2403 prevent infinite recursion here and speed things up.
2404
2405 If CODE is an unsigned comparison, then we can never do this optimization,
2406 because it gives an incorrect result if the subtraction wraps around zero.
2407 ANSI C defines unsigned operations such that they never overflow, and
2408 thus such cases can not be ignored. */
2409
2410 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2411 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2412 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2413 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2414 && code != GTU && code != GEU && code != LTU && code != LEU)
2415 return simplify_relational_operation (signed_condition (code),
2416 mode, tem, const0_rtx);
2417
2418 if (flag_unsafe_math_optimizations && code == ORDERED)
2419 return const_true_rtx;
2420
2421 if (flag_unsafe_math_optimizations && code == UNORDERED)
2422 return const0_rtx;
2423
2424 /* For modes without NaNs, if the two operands are equal, we know the
2425 result except if they have side-effects. */
2426 if (! HONOR_NANS (GET_MODE (trueop0))
2427 && rtx_equal_p (trueop0, trueop1)
2428 && ! side_effects_p (trueop0))
2429 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2430
2431 /* If the operands are floating-point constants, see if we can fold
2432 the result. */
2433 else if (GET_CODE (trueop0) == CONST_DOUBLE
2434 && GET_CODE (trueop1) == CONST_DOUBLE
2435 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2436 {
2437 REAL_VALUE_TYPE d0, d1;
2438
2439 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2440 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2441
2442 /* Comparisons are unordered iff at least one of the values is NaN. */
2443 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2444 switch (code)
2445 {
2446 case UNEQ:
2447 case UNLT:
2448 case UNGT:
2449 case UNLE:
2450 case UNGE:
2451 case NE:
2452 case UNORDERED:
2453 return const_true_rtx;
2454 case EQ:
2455 case LT:
2456 case GT:
2457 case LE:
2458 case GE:
2459 case LTGT:
2460 case ORDERED:
2461 return const0_rtx;
2462 default:
2463 return 0;
2464 }
2465
2466 equal = REAL_VALUES_EQUAL (d0, d1);
2467 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2468 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2469 }
2470
2471 /* Otherwise, see if the operands are both integers. */
2472 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2473 && (GET_CODE (trueop0) == CONST_DOUBLE
2474 || GET_CODE (trueop0) == CONST_INT)
2475 && (GET_CODE (trueop1) == CONST_DOUBLE
2476 || GET_CODE (trueop1) == CONST_INT))
2477 {
2478 int width = GET_MODE_BITSIZE (mode);
2479 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2480 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2481
2482 /* Get the two words comprising each integer constant. */
2483 if (GET_CODE (trueop0) == CONST_DOUBLE)
2484 {
2485 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2486 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2487 }
2488 else
2489 {
2490 l0u = l0s = INTVAL (trueop0);
2491 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2492 }
2493
2494 if (GET_CODE (trueop1) == CONST_DOUBLE)
2495 {
2496 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2497 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2498 }
2499 else
2500 {
2501 l1u = l1s = INTVAL (trueop1);
2502 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2503 }
2504
2505 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2506 we have to sign or zero-extend the values. */
2507 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2508 {
2509 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2510 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2511
2512 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2513 l0s |= ((HOST_WIDE_INT) (-1) << width);
2514
2515 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2516 l1s |= ((HOST_WIDE_INT) (-1) << width);
2517 }
2518 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2519 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2520
2521 equal = (h0u == h1u && l0u == l1u);
2522 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2523 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2524 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2525 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2526 }
2527
2528 /* Otherwise, there are some code-specific tests we can make. */
2529 else
2530 {
2531 switch (code)
2532 {
2533 case EQ:
2534 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2535 return const0_rtx;
2536 break;
2537
2538 case NE:
2539 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2540 return const_true_rtx;
2541 break;
2542
2543 case GEU:
2544 /* Unsigned values are never negative. */
2545 if (trueop1 == const0_rtx)
2546 return const_true_rtx;
2547 break;
2548
2549 case LTU:
2550 if (trueop1 == const0_rtx)
2551 return const0_rtx;
2552 break;
2553
2554 case LEU:
2555 /* Unsigned values are never greater than the largest
2556 unsigned value. */
2557 if (GET_CODE (trueop1) == CONST_INT
2558 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2559 && INTEGRAL_MODE_P (mode))
2560 return const_true_rtx;
2561 break;
2562
2563 case GTU:
2564 if (GET_CODE (trueop1) == CONST_INT
2565 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2566 && INTEGRAL_MODE_P (mode))
2567 return const0_rtx;
2568 break;
2569
2570 case LT:
2571 /* Optimize abs(x) < 0.0. */
2572 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2573 {
2574 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2575 : trueop0;
2576 if (GET_CODE (tem) == ABS)
2577 return const0_rtx;
2578 }
2579 break;
2580
2581 case GE:
2582 /* Optimize abs(x) >= 0.0. */
2583 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2584 {
2585 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2586 : trueop0;
2587 if (GET_CODE (tem) == ABS)
2588 return const_true_rtx;
2589 }
2590 break;
2591
2592 case UNGE:
2593 /* Optimize ! (abs(x) < 0.0). */
2594 if (trueop1 == CONST0_RTX (mode))
2595 {
2596 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2597 : trueop0;
2598 if (GET_CODE (tem) == ABS)
2599 return const_true_rtx;
2600 }
2601 break;
2602
2603 default:
2604 break;
2605 }
2606
2607 return 0;
2608 }
2609
2610 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2611 as appropriate. */
2612 switch (code)
2613 {
2614 case EQ:
2615 case UNEQ:
2616 return equal ? const_true_rtx : const0_rtx;
2617 case NE:
2618 case LTGT:
2619 return ! equal ? const_true_rtx : const0_rtx;
2620 case LT:
2621 case UNLT:
2622 return op0lt ? const_true_rtx : const0_rtx;
2623 case GT:
2624 case UNGT:
2625 return op1lt ? const_true_rtx : const0_rtx;
2626 case LTU:
2627 return op0ltu ? const_true_rtx : const0_rtx;
2628 case GTU:
2629 return op1ltu ? const_true_rtx : const0_rtx;
2630 case LE:
2631 case UNLE:
2632 return equal || op0lt ? const_true_rtx : const0_rtx;
2633 case GE:
2634 case UNGE:
2635 return equal || op1lt ? const_true_rtx : const0_rtx;
2636 case LEU:
2637 return equal || op0ltu ? const_true_rtx : const0_rtx;
2638 case GEU:
2639 return equal || op1ltu ? const_true_rtx : const0_rtx;
2640 case ORDERED:
2641 return const_true_rtx;
2642 case UNORDERED:
2643 return const0_rtx;
2644 default:
2645 abort ();
2646 }
2647 }
2648 \f
2649 /* Simplify CODE, an operation with result mode MODE and three operands,
2650 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2651 a constant. Return 0 if no simplifications is possible. */
2652
2653 rtx
2654 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2655 enum machine_mode op0_mode, rtx op0, rtx op1,
2656 rtx op2)
2657 {
2658 unsigned int width = GET_MODE_BITSIZE (mode);
2659
2660 /* VOIDmode means "infinite" precision. */
2661 if (width == 0)
2662 width = HOST_BITS_PER_WIDE_INT;
2663
2664 switch (code)
2665 {
2666 case SIGN_EXTRACT:
2667 case ZERO_EXTRACT:
2668 if (GET_CODE (op0) == CONST_INT
2669 && GET_CODE (op1) == CONST_INT
2670 && GET_CODE (op2) == CONST_INT
2671 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2672 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2673 {
2674 /* Extracting a bit-field from a constant */
2675 HOST_WIDE_INT val = INTVAL (op0);
2676
2677 if (BITS_BIG_ENDIAN)
2678 val >>= (GET_MODE_BITSIZE (op0_mode)
2679 - INTVAL (op2) - INTVAL (op1));
2680 else
2681 val >>= INTVAL (op2);
2682
2683 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2684 {
2685 /* First zero-extend. */
2686 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2687 /* If desired, propagate sign bit. */
2688 if (code == SIGN_EXTRACT
2689 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2690 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2691 }
2692
2693 /* Clear the bits that don't belong in our mode,
2694 unless they and our sign bit are all one.
2695 So we get either a reasonable negative value or a reasonable
2696 unsigned value for this mode. */
2697 if (width < HOST_BITS_PER_WIDE_INT
2698 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2699 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2700 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2701
2702 return GEN_INT (val);
2703 }
2704 break;
2705
2706 case IF_THEN_ELSE:
2707 if (GET_CODE (op0) == CONST_INT)
2708 return op0 != const0_rtx ? op1 : op2;
2709
2710 /* Convert a == b ? b : a to "a". */
2711 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2712 && !HONOR_NANS (mode)
2713 && rtx_equal_p (XEXP (op0, 0), op1)
2714 && rtx_equal_p (XEXP (op0, 1), op2))
2715 return op1;
2716 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2717 && !HONOR_NANS (mode)
2718 && rtx_equal_p (XEXP (op0, 1), op1)
2719 && rtx_equal_p (XEXP (op0, 0), op2))
2720 return op2;
2721 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2722 {
2723 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2724 ? GET_MODE (XEXP (op0, 1))
2725 : GET_MODE (XEXP (op0, 0)));
2726 rtx temp;
2727 if (cmp_mode == VOIDmode)
2728 cmp_mode = op0_mode;
2729 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2730 XEXP (op0, 0), XEXP (op0, 1));
2731
2732 /* See if any simplifications were possible. */
2733 if (temp == const0_rtx)
2734 return op2;
2735 else if (temp == const_true_rtx)
2736 return op1;
2737 else if (temp)
2738 abort ();
2739
2740 /* Look for happy constants in op1 and op2. */
2741 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2742 {
2743 HOST_WIDE_INT t = INTVAL (op1);
2744 HOST_WIDE_INT f = INTVAL (op2);
2745
2746 if (t == STORE_FLAG_VALUE && f == 0)
2747 code = GET_CODE (op0);
2748 else if (t == 0 && f == STORE_FLAG_VALUE)
2749 {
2750 enum rtx_code tmp;
2751 tmp = reversed_comparison_code (op0, NULL_RTX);
2752 if (tmp == UNKNOWN)
2753 break;
2754 code = tmp;
2755 }
2756 else
2757 break;
2758
2759 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2760 }
2761 }
2762 break;
2763 case VEC_MERGE:
2764 if (GET_MODE (op0) != mode
2765 || GET_MODE (op1) != mode
2766 || !VECTOR_MODE_P (mode))
2767 abort ();
2768 op2 = avoid_constant_pool_reference (op2);
2769 if (GET_CODE (op2) == CONST_INT)
2770 {
2771 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2772 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2773 int mask = (1 << n_elts) - 1;
2774
2775 if (!(INTVAL (op2) & mask))
2776 return op1;
2777 if ((INTVAL (op2) & mask) == mask)
2778 return op0;
2779
2780 op0 = avoid_constant_pool_reference (op0);
2781 op1 = avoid_constant_pool_reference (op1);
2782 if (GET_CODE (op0) == CONST_VECTOR
2783 && GET_CODE (op1) == CONST_VECTOR)
2784 {
2785 rtvec v = rtvec_alloc (n_elts);
2786 unsigned int i;
2787
2788 for (i = 0; i < n_elts; i++)
2789 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2790 ? CONST_VECTOR_ELT (op0, i)
2791 : CONST_VECTOR_ELT (op1, i));
2792 return gen_rtx_CONST_VECTOR (mode, v);
2793 }
2794 }
2795 break;
2796
2797 default:
2798 abort ();
2799 }
2800
2801 return 0;
2802 }
2803
2804 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2805 Return 0 if no simplifications is possible. */
2806 rtx
2807 simplify_subreg (enum machine_mode outermode, rtx op,
2808 enum machine_mode innermode, unsigned int byte)
2809 {
2810 /* Little bit of sanity checking. */
2811 if (innermode == VOIDmode || outermode == VOIDmode
2812 || innermode == BLKmode || outermode == BLKmode)
2813 abort ();
2814
2815 if (GET_MODE (op) != innermode
2816 && GET_MODE (op) != VOIDmode)
2817 abort ();
2818
2819 if (byte % GET_MODE_SIZE (outermode)
2820 || byte >= GET_MODE_SIZE (innermode))
2821 abort ();
2822
2823 if (outermode == innermode && !byte)
2824 return op;
2825
2826 /* Simplify subregs of vector constants. */
2827 if (GET_CODE (op) == CONST_VECTOR)
2828 {
2829 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2830 const unsigned int offset = byte / elt_size;
2831 rtx elt;
2832
2833 if (GET_MODE_INNER (innermode) == outermode)
2834 {
2835 elt = CONST_VECTOR_ELT (op, offset);
2836
2837 /* ?? We probably don't need this copy_rtx because constants
2838 can be shared. ?? */
2839
2840 return copy_rtx (elt);
2841 }
2842 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2843 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2844 {
2845 return (gen_rtx_CONST_VECTOR
2846 (outermode,
2847 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2848 &CONST_VECTOR_ELT (op, offset))));
2849 }
2850 else if (GET_MODE_CLASS (outermode) == MODE_INT
2851 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2852 {
2853 /* This happens when the target register size is smaller then
2854 the vector mode, and we synthesize operations with vectors
2855 of elements that are smaller than the register size. */
2856 HOST_WIDE_INT sum = 0, high = 0;
2857 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2858 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2859 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2860 int shift = BITS_PER_UNIT * elt_size;
2861 unsigned HOST_WIDE_INT unit_mask;
2862
2863 unit_mask = (unsigned HOST_WIDE_INT) -1
2864 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2865
2866 for (; n_elts--; i += step)
2867 {
2868 elt = CONST_VECTOR_ELT (op, i);
2869 if (GET_CODE (elt) == CONST_DOUBLE
2870 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2871 {
2872 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2873 elt);
2874 if (! elt)
2875 return NULL_RTX;
2876 }
2877 if (GET_CODE (elt) != CONST_INT)
2878 return NULL_RTX;
2879 /* Avoid overflow. */
2880 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2881 return NULL_RTX;
2882 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2883 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2884 }
2885 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2886 return GEN_INT (trunc_int_for_mode (sum, outermode));
2887 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2888 return immed_double_const (sum, high, outermode);
2889 else
2890 return NULL_RTX;
2891 }
2892 else if (GET_MODE_CLASS (outermode) == MODE_INT
2893 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2894 {
2895 enum machine_mode new_mode
2896 = int_mode_for_mode (GET_MODE_INNER (innermode));
2897 int subbyte = byte % elt_size;
2898
2899 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2900 if (! op)
2901 return NULL_RTX;
2902 return simplify_subreg (outermode, op, new_mode, subbyte);
2903 }
2904 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2905 /* This shouldn't happen, but let's not do anything stupid. */
2906 return NULL_RTX;
2907 }
2908
2909 /* Attempt to simplify constant to non-SUBREG expression. */
2910 if (CONSTANT_P (op))
2911 {
2912 int offset, part;
2913 unsigned HOST_WIDE_INT val = 0;
2914
2915 if (VECTOR_MODE_P (outermode))
2916 {
2917 /* Construct a CONST_VECTOR from individual subregs. */
2918 enum machine_mode submode = GET_MODE_INNER (outermode);
2919 int subsize = GET_MODE_UNIT_SIZE (outermode);
2920 int i, elts = GET_MODE_NUNITS (outermode);
2921 rtvec v = rtvec_alloc (elts);
2922 rtx elt;
2923
2924 for (i = 0; i < elts; i++, byte += subsize)
2925 {
2926 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2927 /* ??? It would be nice if we could actually make such subregs
2928 on targets that allow such relocations. */
2929 if (byte >= GET_MODE_SIZE (innermode))
2930 elt = CONST0_RTX (submode);
2931 else
2932 elt = simplify_subreg (submode, op, innermode, byte);
2933 if (! elt)
2934 return NULL_RTX;
2935 RTVEC_ELT (v, i) = elt;
2936 }
2937 return gen_rtx_CONST_VECTOR (outermode, v);
2938 }
2939
2940 /* ??? This code is partly redundant with code below, but can handle
2941 the subregs of floats and similar corner cases.
2942 Later it we should move all simplification code here and rewrite
2943 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2944 using SIMPLIFY_SUBREG. */
2945 if (subreg_lowpart_offset (outermode, innermode) == byte
2946 && GET_CODE (op) != CONST_VECTOR)
2947 {
2948 rtx new = gen_lowpart_if_possible (outermode, op);
2949 if (new)
2950 return new;
2951 }
2952
2953 /* Similar comment as above apply here. */
2954 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2955 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2956 && GET_MODE_CLASS (outermode) == MODE_INT)
2957 {
2958 rtx new = constant_subword (op,
2959 (byte / UNITS_PER_WORD),
2960 innermode);
2961 if (new)
2962 return new;
2963 }
2964
2965 if (GET_MODE_CLASS (outermode) != MODE_INT
2966 && GET_MODE_CLASS (outermode) != MODE_CC)
2967 {
2968 enum machine_mode new_mode = int_mode_for_mode (outermode);
2969
2970 if (new_mode != innermode || byte != 0)
2971 {
2972 op = simplify_subreg (new_mode, op, innermode, byte);
2973 if (! op)
2974 return NULL_RTX;
2975 return simplify_subreg (outermode, op, new_mode, 0);
2976 }
2977 }
2978
2979 offset = byte * BITS_PER_UNIT;
2980 switch (GET_CODE (op))
2981 {
2982 case CONST_DOUBLE:
2983 if (GET_MODE (op) != VOIDmode)
2984 break;
2985
2986 /* We can't handle this case yet. */
2987 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2988 return NULL_RTX;
2989
2990 part = offset >= HOST_BITS_PER_WIDE_INT;
2991 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2992 && BYTES_BIG_ENDIAN)
2993 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2994 && WORDS_BIG_ENDIAN))
2995 part = !part;
2996 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2997 offset %= HOST_BITS_PER_WIDE_INT;
2998
2999 /* We've already picked the word we want from a double, so
3000 pretend this is actually an integer. */
3001 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3002
3003 /* FALLTHROUGH */
3004 case CONST_INT:
3005 if (GET_CODE (op) == CONST_INT)
3006 val = INTVAL (op);
3007
3008 /* We don't handle synthesizing of non-integral constants yet. */
3009 if (GET_MODE_CLASS (outermode) != MODE_INT)
3010 return NULL_RTX;
3011
3012 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3013 {
3014 if (WORDS_BIG_ENDIAN)
3015 offset = (GET_MODE_BITSIZE (innermode)
3016 - GET_MODE_BITSIZE (outermode) - offset);
3017 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3018 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3019 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3020 - 2 * (offset % BITS_PER_WORD));
3021 }
3022
3023 if (offset >= HOST_BITS_PER_WIDE_INT)
3024 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3025 else
3026 {
3027 val >>= offset;
3028 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3029 val = trunc_int_for_mode (val, outermode);
3030 return GEN_INT (val);
3031 }
3032 default:
3033 break;
3034 }
3035 }
3036
3037 /* Changing mode twice with SUBREG => just change it once,
3038 or not at all if changing back op starting mode. */
3039 if (GET_CODE (op) == SUBREG)
3040 {
3041 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3042 int final_offset = byte + SUBREG_BYTE (op);
3043 rtx new;
3044
3045 if (outermode == innermostmode
3046 && byte == 0 && SUBREG_BYTE (op) == 0)
3047 return SUBREG_REG (op);
3048
3049 /* The SUBREG_BYTE represents offset, as if the value were stored
3050 in memory. Irritating exception is paradoxical subreg, where
3051 we define SUBREG_BYTE to be 0. On big endian machines, this
3052 value should be negative. For a moment, undo this exception. */
3053 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3054 {
3055 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3056 if (WORDS_BIG_ENDIAN)
3057 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3058 if (BYTES_BIG_ENDIAN)
3059 final_offset += difference % UNITS_PER_WORD;
3060 }
3061 if (SUBREG_BYTE (op) == 0
3062 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3063 {
3064 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3065 if (WORDS_BIG_ENDIAN)
3066 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3067 if (BYTES_BIG_ENDIAN)
3068 final_offset += difference % UNITS_PER_WORD;
3069 }
3070
3071 /* See whether resulting subreg will be paradoxical. */
3072 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3073 {
3074 /* In nonparadoxical subregs we can't handle negative offsets. */
3075 if (final_offset < 0)
3076 return NULL_RTX;
3077 /* Bail out in case resulting subreg would be incorrect. */
3078 if (final_offset % GET_MODE_SIZE (outermode)
3079 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3080 return NULL_RTX;
3081 }
3082 else
3083 {
3084 int offset = 0;
3085 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3086
3087 /* In paradoxical subreg, see if we are still looking on lower part.
3088 If so, our SUBREG_BYTE will be 0. */
3089 if (WORDS_BIG_ENDIAN)
3090 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3091 if (BYTES_BIG_ENDIAN)
3092 offset += difference % UNITS_PER_WORD;
3093 if (offset == final_offset)
3094 final_offset = 0;
3095 else
3096 return NULL_RTX;
3097 }
3098
3099 /* Recurse for further possible simplifications. */
3100 new = simplify_subreg (outermode, SUBREG_REG (op),
3101 GET_MODE (SUBREG_REG (op)),
3102 final_offset);
3103 if (new)
3104 return new;
3105 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3106 }
3107
3108 /* SUBREG of a hard register => just change the register number
3109 and/or mode. If the hard register is not valid in that mode,
3110 suppress this simplification. If the hard register is the stack,
3111 frame, or argument pointer, leave this as a SUBREG. */
3112
3113 if (REG_P (op)
3114 && (! REG_FUNCTION_VALUE_P (op)
3115 || ! rtx_equal_function_value_matters)
3116 && REGNO (op) < FIRST_PSEUDO_REGISTER
3117 #ifdef CANNOT_CHANGE_MODE_CLASS
3118 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3119 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3120 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3121 #endif
3122 && ((reload_completed && !frame_pointer_needed)
3123 || (REGNO (op) != FRAME_POINTER_REGNUM
3124 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3125 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3126 #endif
3127 ))
3128 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3129 && REGNO (op) != ARG_POINTER_REGNUM
3130 #endif
3131 && REGNO (op) != STACK_POINTER_REGNUM
3132 && subreg_offset_representable_p (REGNO (op), innermode,
3133 byte, outermode))
3134 {
3135 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3136 int final_regno = subreg_hard_regno (tem, 0);
3137
3138 /* ??? We do allow it if the current REG is not valid for
3139 its mode. This is a kludge to work around how float/complex
3140 arguments are passed on 32-bit SPARC and should be fixed. */
3141 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3142 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3143 {
3144 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3145
3146 /* Propagate original regno. We don't have any way to specify
3147 the offset inside original regno, so do so only for lowpart.
3148 The information is used only by alias analysis that can not
3149 grog partial register anyway. */
3150
3151 if (subreg_lowpart_offset (outermode, innermode) == byte)
3152 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3153 return x;
3154 }
3155 }
3156
3157 /* If we have a SUBREG of a register that we are replacing and we are
3158 replacing it with a MEM, make a new MEM and try replacing the
3159 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3160 or if we would be widening it. */
3161
3162 if (GET_CODE (op) == MEM
3163 && ! mode_dependent_address_p (XEXP (op, 0))
3164 /* Allow splitting of volatile memory references in case we don't
3165 have instruction to move the whole thing. */
3166 && (! MEM_VOLATILE_P (op)
3167 || ! have_insn_for (SET, innermode))
3168 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3169 return adjust_address_nv (op, outermode, byte);
3170
3171 /* Handle complex values represented as CONCAT
3172 of real and imaginary part. */
3173 if (GET_CODE (op) == CONCAT)
3174 {
3175 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3176 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3177 unsigned int final_offset;
3178 rtx res;
3179
3180 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3181 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3182 if (res)
3183 return res;
3184 /* We can at least simplify it by referring directly to the relevant part. */
3185 return gen_rtx_SUBREG (outermode, part, final_offset);
3186 }
3187
3188 return NULL_RTX;
3189 }
3190 /* Make a SUBREG operation or equivalent if it folds. */
3191
3192 rtx
3193 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3194 enum machine_mode innermode, unsigned int byte)
3195 {
3196 rtx new;
3197 /* Little bit of sanity checking. */
3198 if (innermode == VOIDmode || outermode == VOIDmode
3199 || innermode == BLKmode || outermode == BLKmode)
3200 abort ();
3201
3202 if (GET_MODE (op) != innermode
3203 && GET_MODE (op) != VOIDmode)
3204 abort ();
3205
3206 if (byte % GET_MODE_SIZE (outermode)
3207 || byte >= GET_MODE_SIZE (innermode))
3208 abort ();
3209
3210 if (GET_CODE (op) == QUEUED)
3211 return NULL_RTX;
3212
3213 new = simplify_subreg (outermode, op, innermode, byte);
3214 if (new)
3215 return new;
3216
3217 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3218 return NULL_RTX;
3219
3220 return gen_rtx_SUBREG (outermode, op, byte);
3221 }
3222 /* Simplify X, an rtx expression.
3223
3224 Return the simplified expression or NULL if no simplifications
3225 were possible.
3226
3227 This is the preferred entry point into the simplification routines;
3228 however, we still allow passes to call the more specific routines.
3229
3230 Right now GCC has three (yes, three) major bodies of RTL simplification
3231 code that need to be unified.
3232
3233 1. fold_rtx in cse.c. This code uses various CSE specific
3234 information to aid in RTL simplification.
3235
3236 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3237 it uses combine specific information to aid in RTL
3238 simplification.
3239
3240 3. The routines in this file.
3241
3242
3243 Long term we want to only have one body of simplification code; to
3244 get to that state I recommend the following steps:
3245
3246 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3247 which are not pass dependent state into these routines.
3248
3249 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3250 use this routine whenever possible.
3251
3252 3. Allow for pass dependent state to be provided to these
3253 routines and add simplifications based on the pass dependent
3254 state. Remove code from cse.c & combine.c that becomes
3255 redundant/dead.
3256
3257 It will take time, but ultimately the compiler will be easier to
3258 maintain and improve. It's totally silly that when we add a
3259 simplification that it needs to be added to 4 places (3 for RTL
3260 simplification and 1 for tree simplification. */
3261
3262 rtx
3263 simplify_rtx (rtx x)
3264 {
3265 enum rtx_code code = GET_CODE (x);
3266 enum machine_mode mode = GET_MODE (x);
3267 rtx temp;
3268
3269 switch (GET_RTX_CLASS (code))
3270 {
3271 case '1':
3272 return simplify_unary_operation (code, mode,
3273 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3274 case 'c':
3275 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3276 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3277
3278 /* Fall through.... */
3279
3280 case '2':
3281 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3282
3283 case '3':
3284 case 'b':
3285 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3286 XEXP (x, 0), XEXP (x, 1),
3287 XEXP (x, 2));
3288
3289 case '<':
3290 temp = simplify_relational_operation (code,
3291 ((GET_MODE (XEXP (x, 0))
3292 != VOIDmode)
3293 ? GET_MODE (XEXP (x, 0))
3294 : GET_MODE (XEXP (x, 1))),
3295 XEXP (x, 0), XEXP (x, 1));
3296 #ifdef FLOAT_STORE_FLAG_VALUE
3297 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3298 {
3299 if (temp == const0_rtx)
3300 temp = CONST0_RTX (mode);
3301 else
3302 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3303 mode);
3304 }
3305 #endif
3306 return temp;
3307
3308 case 'x':
3309 if (code == SUBREG)
3310 return simplify_gen_subreg (mode, SUBREG_REG (x),
3311 GET_MODE (SUBREG_REG (x)),
3312 SUBREG_BYTE (x));
3313 if (code == CONSTANT_P_RTX)
3314 {
3315 if (CONSTANT_P (XEXP (x, 0)))
3316 return const1_rtx;
3317 }
3318 break;
3319
3320 case 'o':
3321 if (code == LO_SUM)
3322 {
3323 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3324 if (GET_CODE (XEXP (x, 0)) == HIGH
3325 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3326 return XEXP (x, 1);
3327 }
3328 break;
3329
3330 default:
3331 break;
3332 }
3333 return NULL;
3334 }