simplify-rtx.c (simplify_subreg): Use GET_MODE_SIZE instead of GET_MODE_UNIT_SIZE...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
53 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
54 const void *));
55 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
56 enum machine_mode, rtx,
57 rtx, int));
58 \f
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
61 static rtx
62 neg_const_int (mode, i)
63 enum machine_mode mode;
64 rtx i;
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (code, mode, op0, op1)
75 enum rtx_code code;
76 enum machine_mode mode;
77 rtx op0, op1;
78 {
79 rtx tem;
80
81 /* Put complex operands first and constants second if commutative. */
82 if (GET_RTX_CLASS (code) == 'c'
83 && swap_commutative_operands_p (op0, op1))
84 tem = op0, op0 = op1, op1 = tem;
85
86 /* If this simplifies, do it. */
87 tem = simplify_binary_operation (code, mode, op0, op1);
88 if (tem)
89 return tem;
90
91 /* Handle addition and subtraction specially. Otherwise, just form
92 the operation. */
93
94 if (code == PLUS || code == MINUS)
95 {
96 tem = simplify_plus_minus (code, mode, op0, op1, 1);
97 if (tem)
98 return tem;
99 }
100
101 return gen_rtx_fmt_ee (code, mode, op0, op1);
102 }
103 \f
104 /* If X is a MEM referencing the constant pool, return the real value.
105 Otherwise return X. */
106 rtx
107 avoid_constant_pool_reference (x)
108 rtx x;
109 {
110 rtx c, tmp, addr;
111 enum machine_mode cmode;
112
113 switch (GET_CODE (x))
114 {
115 case MEM:
116 break;
117
118 case FLOAT_EXTEND:
119 /* Handle float extensions of constant pool references. */
120 tmp = XEXP (x, 0);
121 c = avoid_constant_pool_reference (tmp);
122 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
123 {
124 REAL_VALUE_TYPE d;
125
126 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
127 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
128 }
129 return x;
130
131 default:
132 return x;
133 }
134
135 addr = XEXP (x, 0);
136
137 /* Call target hook to avoid the effects of -fpic etc... */
138 addr = (*targetm.delegitimize_address) (addr);
139
140 if (GET_CODE (addr) == LO_SUM)
141 addr = XEXP (addr, 1);
142
143 if (GET_CODE (addr) != SYMBOL_REF
144 || ! CONSTANT_POOL_ADDRESS_P (addr))
145 return x;
146
147 c = get_pool_constant (addr);
148 cmode = get_pool_mode (addr);
149
150 /* If we're accessing the constant in a different mode than it was
151 originally stored, attempt to fix that up via subreg simplifications.
152 If that fails we have no choice but to return the original memory. */
153 if (cmode != GET_MODE (x))
154 {
155 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
156 return c ? c : x;
157 }
158
159 return c;
160 }
161 \f
162 /* Make a unary operation by first seeing if it folds and otherwise making
163 the specified operation. */
164
165 rtx
166 simplify_gen_unary (code, mode, op, op_mode)
167 enum rtx_code code;
168 enum machine_mode mode;
169 rtx op;
170 enum machine_mode op_mode;
171 {
172 rtx tem;
173
174 /* If this simplifies, use it. */
175 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
176 return tem;
177
178 return gen_rtx_fmt_e (code, mode, op);
179 }
180
181 /* Likewise for ternary operations. */
182
183 rtx
184 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
185 enum rtx_code code;
186 enum machine_mode mode, op0_mode;
187 rtx op0, op1, op2;
188 {
189 rtx tem;
190
191 /* If this simplifies, use it. */
192 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
193 op0, op1, op2)))
194 return tem;
195
196 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
197 }
198 \f
199 /* Likewise, for relational operations.
200 CMP_MODE specifies mode comparison is done in.
201 */
202
203 rtx
204 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
205 enum rtx_code code;
206 enum machine_mode mode;
207 enum machine_mode cmp_mode;
208 rtx op0, op1;
209 {
210 rtx tem;
211
212 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
213 return tem;
214
215 /* For the following tests, ensure const0_rtx is op1. */
216 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
217 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
218
219 /* If op0 is a compare, extract the comparison arguments from it. */
220 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
221 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
222
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (code == NE && op1 == const0_rtx
225 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
226 return op0;
227 else if (code == EQ && op1 == const0_rtx)
228 {
229 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
230 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
231 if (new != UNKNOWN)
232 {
233 code = new;
234 mode = cmp_mode;
235 op1 = XEXP (op0, 1);
236 op0 = XEXP (op0, 0);
237 }
238 }
239
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0, op1))
242 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
243
244 return gen_rtx_fmt_ee (code, mode, op0, op1);
245 }
246 \f
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
249
250 rtx
251 simplify_replace_rtx (x, old, new)
252 rtx x;
253 rtx old;
254 rtx new;
255 {
256 enum rtx_code code = GET_CODE (x);
257 enum machine_mode mode = GET_MODE (x);
258
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
262
263 if (x == old)
264 return new;
265
266 switch (GET_RTX_CLASS (code))
267 {
268 case '1':
269 {
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op = (XEXP (x, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
273
274 return simplify_gen_unary (code, mode, op, op_mode);
275 }
276
277 case '2':
278 case 'c':
279 return
280 simplify_gen_binary (code, mode,
281 simplify_replace_rtx (XEXP (x, 0), old, new),
282 simplify_replace_rtx (XEXP (x, 1), old, new));
283 case '<':
284 {
285 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x, 0))
287 : GET_MODE (XEXP (x, 1)));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
290
291 return
292 simplify_gen_relational (code, mode,
293 (op_mode != VOIDmode
294 ? op_mode
295 : GET_MODE (op0) != VOIDmode
296 ? GET_MODE (op0)
297 : GET_MODE (op1)),
298 op0, op1);
299 }
300
301 case '3':
302 case 'b':
303 {
304 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
305 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
306
307 return
308 simplify_gen_ternary (code, mode,
309 (op_mode != VOIDmode
310 ? op_mode
311 : GET_MODE (op0)),
312 op0,
313 simplify_replace_rtx (XEXP (x, 1), old, new),
314 simplify_replace_rtx (XEXP (x, 2), old, new));
315 }
316
317 case 'x':
318 /* The only case we try to handle is a SUBREG. */
319 if (code == SUBREG)
320 {
321 rtx exp;
322 exp = simplify_gen_subreg (GET_MODE (x),
323 simplify_replace_rtx (SUBREG_REG (x),
324 old, new),
325 GET_MODE (SUBREG_REG (x)),
326 SUBREG_BYTE (x));
327 if (exp)
328 x = exp;
329 }
330 return x;
331
332 case 'o':
333 if (code == MEM)
334 return replace_equiv_address_nv (x,
335 simplify_replace_rtx (XEXP (x, 0),
336 old, new));
337 else if (code == LO_SUM)
338 {
339 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
340 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
341
342 /* (lo_sum (high x) x) -> x */
343 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 return op1;
345
346 return gen_rtx_LO_SUM (mode, op0, op1);
347 }
348 else if (code == REG)
349 {
350 if (REG_P (old) && REGNO (x) == REGNO (old))
351 return new;
352 }
353
354 return x;
355
356 default:
357 return x;
358 }
359 return x;
360 }
361 \f
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
365 rtx
366 simplify_unary_operation (code, mode, op, op_mode)
367 enum rtx_code code;
368 enum machine_mode mode;
369 rtx op;
370 enum machine_mode op_mode;
371 {
372 unsigned int width = GET_MODE_BITSIZE (mode);
373 rtx trueop = avoid_constant_pool_reference (op);
374
375 if (code == VEC_DUPLICATE)
376 {
377 if (!VECTOR_MODE_P (mode))
378 abort ();
379 if (GET_MODE (trueop) != VOIDmode
380 && !VECTOR_MODE_P (GET_MODE (trueop))
381 && GET_MODE_INNER (mode) != GET_MODE (trueop))
382 abort ();
383 if (GET_MODE (trueop) != VOIDmode
384 && VECTOR_MODE_P (GET_MODE (trueop))
385 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
386 abort ();
387 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
388 || GET_CODE (trueop) == CONST_VECTOR)
389 {
390 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
391 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
392 rtvec v = rtvec_alloc (n_elts);
393 unsigned int i;
394
395 if (GET_CODE (trueop) != CONST_VECTOR)
396 for (i = 0; i < n_elts; i++)
397 RTVEC_ELT (v, i) = trueop;
398 else
399 {
400 enum machine_mode inmode = GET_MODE (trueop);
401 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
402 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
403
404 if (in_n_elts >= n_elts || n_elts % in_n_elts)
405 abort ();
406 for (i = 0; i < n_elts; i++)
407 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
408 }
409 return gen_rtx_CONST_VECTOR (mode, v);
410 }
411 }
412
413 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
414 {
415 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
416 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
417 enum machine_mode opmode = GET_MODE (trueop);
418 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
419 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
420 rtvec v = rtvec_alloc (n_elts);
421 unsigned int i;
422
423 if (op_n_elts != n_elts)
424 abort ();
425
426 for (i = 0; i < n_elts; i++)
427 {
428 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
429 CONST_VECTOR_ELT (trueop, i),
430 GET_MODE_INNER (opmode));
431 if (!x)
432 return 0;
433 RTVEC_ELT (v, i) = x;
434 }
435 return gen_rtx_CONST_VECTOR (mode, v);
436 }
437
438 /* The order of these tests is critical so that, for example, we don't
439 check the wrong mode (input vs. output) for a conversion operation,
440 such as FIX. At some point, this should be simplified. */
441
442 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
444 {
445 HOST_WIDE_INT hv, lv;
446 REAL_VALUE_TYPE d;
447
448 if (GET_CODE (trueop) == CONST_INT)
449 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450 else
451 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
452
453 REAL_VALUE_FROM_INT (d, lv, hv, mode);
454 d = real_value_truncate (mode, d);
455 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
456 }
457 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
458 && (GET_CODE (trueop) == CONST_DOUBLE
459 || GET_CODE (trueop) == CONST_INT))
460 {
461 HOST_WIDE_INT hv, lv;
462 REAL_VALUE_TYPE d;
463
464 if (GET_CODE (trueop) == CONST_INT)
465 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
466 else
467 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
468
469 if (op_mode == VOIDmode)
470 {
471 /* We don't know how to interpret negative-looking numbers in
472 this case, so don't try to fold those. */
473 if (hv < 0)
474 return 0;
475 }
476 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
477 ;
478 else
479 hv = 0, lv &= GET_MODE_MASK (op_mode);
480
481 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
482 d = real_value_truncate (mode, d);
483 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
484 }
485
486 if (GET_CODE (trueop) == CONST_INT
487 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
488 {
489 HOST_WIDE_INT arg0 = INTVAL (trueop);
490 HOST_WIDE_INT val;
491
492 switch (code)
493 {
494 case NOT:
495 val = ~ arg0;
496 break;
497
498 case NEG:
499 val = - arg0;
500 break;
501
502 case ABS:
503 val = (arg0 >= 0 ? arg0 : - arg0);
504 break;
505
506 case FFS:
507 /* Don't use ffs here. Instead, get low order bit and then its
508 number. If arg0 is zero, this will return 0, as desired. */
509 arg0 &= GET_MODE_MASK (mode);
510 val = exact_log2 (arg0 & (- arg0)) + 1;
511 break;
512
513 case CLZ:
514 arg0 &= GET_MODE_MASK (mode);
515 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 ;
517 else
518 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
519 break;
520
521 case CTZ:
522 arg0 &= GET_MODE_MASK (mode);
523 if (arg0 == 0)
524 {
525 /* Even if the value at zero is undefined, we have to come
526 up with some replacement. Seems good enough. */
527 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
528 val = GET_MODE_BITSIZE (mode);
529 }
530 else
531 val = exact_log2 (arg0 & -arg0);
532 break;
533
534 case POPCOUNT:
535 arg0 &= GET_MODE_MASK (mode);
536 val = 0;
537 while (arg0)
538 val++, arg0 &= arg0 - 1;
539 break;
540
541 case PARITY:
542 arg0 &= GET_MODE_MASK (mode);
543 val = 0;
544 while (arg0)
545 val++, arg0 &= arg0 - 1;
546 val &= 1;
547 break;
548
549 case TRUNCATE:
550 val = arg0;
551 break;
552
553 case ZERO_EXTEND:
554 /* When zero-extending a CONST_INT, we need to know its
555 original mode. */
556 if (op_mode == VOIDmode)
557 abort ();
558 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
559 {
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 if (width != GET_MODE_BITSIZE (op_mode))
564 abort ();
565 val = arg0;
566 }
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
569 else
570 return 0;
571 break;
572
573 case SIGN_EXTEND:
574 if (op_mode == VOIDmode)
575 op_mode = mode;
576 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
577 {
578 /* If we were really extending the mode,
579 we would have to distinguish between zero-extension
580 and sign-extension. */
581 if (width != GET_MODE_BITSIZE (op_mode))
582 abort ();
583 val = arg0;
584 }
585 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
586 {
587 val
588 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
589 if (val
590 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
591 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
592 }
593 else
594 return 0;
595 break;
596
597 case SQRT:
598 case FLOAT_EXTEND:
599 case FLOAT_TRUNCATE:
600 case SS_TRUNCATE:
601 case US_TRUNCATE:
602 return 0;
603
604 default:
605 abort ();
606 }
607
608 val = trunc_int_for_mode (val, mode);
609
610 return GEN_INT (val);
611 }
612
613 /* We can do some operations on integer CONST_DOUBLEs. Also allow
614 for a DImode operation on a CONST_INT. */
615 else if (GET_MODE (trueop) == VOIDmode
616 && width <= HOST_BITS_PER_WIDE_INT * 2
617 && (GET_CODE (trueop) == CONST_DOUBLE
618 || GET_CODE (trueop) == CONST_INT))
619 {
620 unsigned HOST_WIDE_INT l1, lv;
621 HOST_WIDE_INT h1, hv;
622
623 if (GET_CODE (trueop) == CONST_DOUBLE)
624 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
625 else
626 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
627
628 switch (code)
629 {
630 case NOT:
631 lv = ~ l1;
632 hv = ~ h1;
633 break;
634
635 case NEG:
636 neg_double (l1, h1, &lv, &hv);
637 break;
638
639 case ABS:
640 if (h1 < 0)
641 neg_double (l1, h1, &lv, &hv);
642 else
643 lv = l1, hv = h1;
644 break;
645
646 case FFS:
647 hv = 0;
648 if (l1 == 0)
649 {
650 if (h1 == 0)
651 lv = 0;
652 else
653 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
654 }
655 else
656 lv = exact_log2 (l1 & -l1) + 1;
657 break;
658
659 case CLZ:
660 hv = 0;
661 if (h1 == 0)
662 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
663 else
664 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
665 - HOST_BITS_PER_WIDE_INT;
666 break;
667
668 case CTZ:
669 hv = 0;
670 if (l1 == 0)
671 {
672 if (h1 == 0)
673 lv = GET_MODE_BITSIZE (mode);
674 else
675 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
676 }
677 else
678 lv = exact_log2 (l1 & -l1);
679 break;
680
681 case POPCOUNT:
682 hv = 0;
683 lv = 0;
684 while (l1)
685 lv++, l1 &= l1 - 1;
686 while (h1)
687 lv++, h1 &= h1 - 1;
688 break;
689
690 case PARITY:
691 hv = 0;
692 lv = 0;
693 while (l1)
694 lv++, l1 &= l1 - 1;
695 while (h1)
696 lv++, h1 &= h1 - 1;
697 lv &= 1;
698 break;
699
700 case TRUNCATE:
701 /* This is just a change-of-mode, so do nothing. */
702 lv = l1, hv = h1;
703 break;
704
705 case ZERO_EXTEND:
706 if (op_mode == VOIDmode)
707 abort ();
708
709 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
710 return 0;
711
712 hv = 0;
713 lv = l1 & GET_MODE_MASK (op_mode);
714 break;
715
716 case SIGN_EXTEND:
717 if (op_mode == VOIDmode
718 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
719 return 0;
720 else
721 {
722 lv = l1 & GET_MODE_MASK (op_mode);
723 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
724 && (lv & ((HOST_WIDE_INT) 1
725 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
726 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
727
728 hv = HWI_SIGN_EXTEND (lv);
729 }
730 break;
731
732 case SQRT:
733 return 0;
734
735 default:
736 return 0;
737 }
738
739 return immed_double_const (lv, hv, mode);
740 }
741
742 else if (GET_CODE (trueop) == CONST_DOUBLE
743 && GET_MODE_CLASS (mode) == MODE_FLOAT)
744 {
745 REAL_VALUE_TYPE d, t;
746 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
747
748 switch (code)
749 {
750 case SQRT:
751 if (HONOR_SNANS (mode) && real_isnan (&d))
752 return 0;
753 real_sqrt (&t, mode, &d);
754 d = t;
755 break;
756 case ABS:
757 d = REAL_VALUE_ABS (d);
758 break;
759 case NEG:
760 d = REAL_VALUE_NEGATE (d);
761 break;
762 case FLOAT_TRUNCATE:
763 d = real_value_truncate (mode, d);
764 break;
765 case FLOAT_EXTEND:
766 /* All this does is change the mode. */
767 break;
768 case FIX:
769 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
770 break;
771
772 default:
773 abort ();
774 }
775 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
776 }
777
778 else if (GET_CODE (trueop) == CONST_DOUBLE
779 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
780 && GET_MODE_CLASS (mode) == MODE_INT
781 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
782 {
783 HOST_WIDE_INT i;
784 REAL_VALUE_TYPE d;
785 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
786 switch (code)
787 {
788 case FIX: i = REAL_VALUE_FIX (d); break;
789 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
790 default:
791 abort ();
792 }
793 return gen_int_mode (i, mode);
794 }
795
796 /* This was formerly used only for non-IEEE float.
797 eggert@twinsun.com says it is safe for IEEE also. */
798 else
799 {
800 enum rtx_code reversed;
801 /* There are some simplifications we can do even if the operands
802 aren't constant. */
803 switch (code)
804 {
805 case NOT:
806 /* (not (not X)) == X. */
807 if (GET_CODE (op) == NOT)
808 return XEXP (op, 0);
809
810 /* (not (eq X Y)) == (ne X Y), etc. */
811 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
812 && ((reversed = reversed_comparison_code (op, NULL_RTX))
813 != UNKNOWN))
814 return gen_rtx_fmt_ee (reversed,
815 op_mode, XEXP (op, 0), XEXP (op, 1));
816 break;
817
818 case NEG:
819 /* (neg (neg X)) == X. */
820 if (GET_CODE (op) == NEG)
821 return XEXP (op, 0);
822 break;
823
824 case SIGN_EXTEND:
825 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
826 becomes just the MINUS if its mode is MODE. This allows
827 folding switch statements on machines using casesi (such as
828 the VAX). */
829 if (GET_CODE (op) == TRUNCATE
830 && GET_MODE (XEXP (op, 0)) == mode
831 && GET_CODE (XEXP (op, 0)) == MINUS
832 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
833 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
834 return XEXP (op, 0);
835
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
837 if (! POINTERS_EXTEND_UNSIGNED
838 && mode == Pmode && GET_MODE (op) == ptr_mode
839 && (CONSTANT_P (op)
840 || (GET_CODE (op) == SUBREG
841 && GET_CODE (SUBREG_REG (op)) == REG
842 && REG_POINTER (SUBREG_REG (op))
843 && GET_MODE (SUBREG_REG (op)) == Pmode)))
844 return convert_memory_address (Pmode, op);
845 #endif
846 break;
847
848 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849 case ZERO_EXTEND:
850 if (POINTERS_EXTEND_UNSIGNED > 0
851 && mode == Pmode && GET_MODE (op) == ptr_mode
852 && (CONSTANT_P (op)
853 || (GET_CODE (op) == SUBREG
854 && GET_CODE (SUBREG_REG (op)) == REG
855 && REG_POINTER (SUBREG_REG (op))
856 && GET_MODE (SUBREG_REG (op)) == Pmode)))
857 return convert_memory_address (Pmode, op);
858 break;
859 #endif
860
861 default:
862 break;
863 }
864
865 return 0;
866 }
867 }
868 \f
869 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
870 and OP1. Return 0 if no simplification is possible.
871
872 Don't use this for relational operations such as EQ or LT.
873 Use simplify_relational_operation instead. */
874 rtx
875 simplify_binary_operation (code, mode, op0, op1)
876 enum rtx_code code;
877 enum machine_mode mode;
878 rtx op0, op1;
879 {
880 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
881 HOST_WIDE_INT val;
882 unsigned int width = GET_MODE_BITSIZE (mode);
883 rtx tem;
884 rtx trueop0 = avoid_constant_pool_reference (op0);
885 rtx trueop1 = avoid_constant_pool_reference (op1);
886
887 /* Relational operations don't work here. We must know the mode
888 of the operands in order to do the comparison correctly.
889 Assuming a full word can give incorrect results.
890 Consider comparing 128 with -128 in QImode. */
891
892 if (GET_RTX_CLASS (code) == '<')
893 abort ();
894
895 /* Make sure the constant is second. */
896 if (GET_RTX_CLASS (code) == 'c'
897 && swap_commutative_operands_p (trueop0, trueop1))
898 {
899 tem = op0, op0 = op1, op1 = tem;
900 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
901 }
902
903 if (VECTOR_MODE_P (mode)
904 && GET_CODE (trueop0) == CONST_VECTOR
905 && GET_CODE (trueop1) == CONST_VECTOR)
906 {
907 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
908 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
909 enum machine_mode op0mode = GET_MODE (trueop0);
910 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
911 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
912 enum machine_mode op1mode = GET_MODE (trueop1);
913 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
914 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
915 rtvec v = rtvec_alloc (n_elts);
916 unsigned int i;
917
918 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
919 abort ();
920
921 for (i = 0; i < n_elts; i++)
922 {
923 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
924 CONST_VECTOR_ELT (trueop0, i),
925 CONST_VECTOR_ELT (trueop1, i));
926 if (!x)
927 return 0;
928 RTVEC_ELT (v, i) = x;
929 }
930
931 return gen_rtx_CONST_VECTOR (mode, v);
932 }
933
934 if (GET_MODE_CLASS (mode) == MODE_FLOAT
935 && GET_CODE (trueop0) == CONST_DOUBLE
936 && GET_CODE (trueop1) == CONST_DOUBLE
937 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
938 {
939 REAL_VALUE_TYPE f0, f1, value;
940
941 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
942 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
943 f0 = real_value_truncate (mode, f0);
944 f1 = real_value_truncate (mode, f1);
945
946 if (code == DIV
947 && !MODE_HAS_INFINITIES (mode)
948 && REAL_VALUES_EQUAL (f1, dconst0))
949 return 0;
950
951 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
952
953 value = real_value_truncate (mode, value);
954 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
955 }
956
957 /* We can fold some multi-word operations. */
958 if (GET_MODE_CLASS (mode) == MODE_INT
959 && width == HOST_BITS_PER_WIDE_INT * 2
960 && (GET_CODE (trueop0) == CONST_DOUBLE
961 || GET_CODE (trueop0) == CONST_INT)
962 && (GET_CODE (trueop1) == CONST_DOUBLE
963 || GET_CODE (trueop1) == CONST_INT))
964 {
965 unsigned HOST_WIDE_INT l1, l2, lv;
966 HOST_WIDE_INT h1, h2, hv;
967
968 if (GET_CODE (trueop0) == CONST_DOUBLE)
969 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
970 else
971 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
972
973 if (GET_CODE (trueop1) == CONST_DOUBLE)
974 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
975 else
976 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
977
978 switch (code)
979 {
980 case MINUS:
981 /* A - B == A + (-B). */
982 neg_double (l2, h2, &lv, &hv);
983 l2 = lv, h2 = hv;
984
985 /* .. fall through ... */
986
987 case PLUS:
988 add_double (l1, h1, l2, h2, &lv, &hv);
989 break;
990
991 case MULT:
992 mul_double (l1, h1, l2, h2, &lv, &hv);
993 break;
994
995 case DIV: case MOD: case UDIV: case UMOD:
996 /* We'd need to include tree.h to do this and it doesn't seem worth
997 it. */
998 return 0;
999
1000 case AND:
1001 lv = l1 & l2, hv = h1 & h2;
1002 break;
1003
1004 case IOR:
1005 lv = l1 | l2, hv = h1 | h2;
1006 break;
1007
1008 case XOR:
1009 lv = l1 ^ l2, hv = h1 ^ h2;
1010 break;
1011
1012 case SMIN:
1013 if (h1 < h2
1014 || (h1 == h2
1015 && ((unsigned HOST_WIDE_INT) l1
1016 < (unsigned HOST_WIDE_INT) l2)))
1017 lv = l1, hv = h1;
1018 else
1019 lv = l2, hv = h2;
1020 break;
1021
1022 case SMAX:
1023 if (h1 > h2
1024 || (h1 == h2
1025 && ((unsigned HOST_WIDE_INT) l1
1026 > (unsigned HOST_WIDE_INT) l2)))
1027 lv = l1, hv = h1;
1028 else
1029 lv = l2, hv = h2;
1030 break;
1031
1032 case UMIN:
1033 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1034 || (h1 == h2
1035 && ((unsigned HOST_WIDE_INT) l1
1036 < (unsigned HOST_WIDE_INT) l2)))
1037 lv = l1, hv = h1;
1038 else
1039 lv = l2, hv = h2;
1040 break;
1041
1042 case UMAX:
1043 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1044 || (h1 == h2
1045 && ((unsigned HOST_WIDE_INT) l1
1046 > (unsigned HOST_WIDE_INT) l2)))
1047 lv = l1, hv = h1;
1048 else
1049 lv = l2, hv = h2;
1050 break;
1051
1052 case LSHIFTRT: case ASHIFTRT:
1053 case ASHIFT:
1054 case ROTATE: case ROTATERT:
1055 #ifdef SHIFT_COUNT_TRUNCATED
1056 if (SHIFT_COUNT_TRUNCATED)
1057 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1058 #endif
1059
1060 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1061 return 0;
1062
1063 if (code == LSHIFTRT || code == ASHIFTRT)
1064 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1065 code == ASHIFTRT);
1066 else if (code == ASHIFT)
1067 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1068 else if (code == ROTATE)
1069 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1070 else /* code == ROTATERT */
1071 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1072 break;
1073
1074 default:
1075 return 0;
1076 }
1077
1078 return immed_double_const (lv, hv, mode);
1079 }
1080
1081 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1082 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1083 {
1084 /* Even if we can't compute a constant result,
1085 there are some cases worth simplifying. */
1086
1087 switch (code)
1088 {
1089 case PLUS:
1090 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1091 when x is NaN, infinite, or finite and nonzero. They aren't
1092 when x is -0 and the rounding mode is not towards -infinity,
1093 since (-0) + 0 is then 0. */
1094 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1095 return op0;
1096
1097 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1098 transformations are safe even for IEEE. */
1099 if (GET_CODE (op0) == NEG)
1100 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1101 else if (GET_CODE (op1) == NEG)
1102 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1103
1104 /* (~a) + 1 -> -a */
1105 if (INTEGRAL_MODE_P (mode)
1106 && GET_CODE (op0) == NOT
1107 && trueop1 == const1_rtx)
1108 return gen_rtx_NEG (mode, XEXP (op0, 0));
1109
1110 /* Handle both-operands-constant cases. We can only add
1111 CONST_INTs to constants since the sum of relocatable symbols
1112 can't be handled by most assemblers. Don't add CONST_INT
1113 to CONST_INT since overflow won't be computed properly if wider
1114 than HOST_BITS_PER_WIDE_INT. */
1115
1116 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1117 && GET_CODE (op1) == CONST_INT)
1118 return plus_constant (op0, INTVAL (op1));
1119 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1120 && GET_CODE (op0) == CONST_INT)
1121 return plus_constant (op1, INTVAL (op0));
1122
1123 /* See if this is something like X * C - X or vice versa or
1124 if the multiplication is written as a shift. If so, we can
1125 distribute and make a new multiply, shift, or maybe just
1126 have X (if C is 2 in the example above). But don't make
1127 real multiply if we didn't have one before. */
1128
1129 if (! FLOAT_MODE_P (mode))
1130 {
1131 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1132 rtx lhs = op0, rhs = op1;
1133 int had_mult = 0;
1134
1135 if (GET_CODE (lhs) == NEG)
1136 coeff0 = -1, lhs = XEXP (lhs, 0);
1137 else if (GET_CODE (lhs) == MULT
1138 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1139 {
1140 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1141 had_mult = 1;
1142 }
1143 else if (GET_CODE (lhs) == ASHIFT
1144 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1145 && INTVAL (XEXP (lhs, 1)) >= 0
1146 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1147 {
1148 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1149 lhs = XEXP (lhs, 0);
1150 }
1151
1152 if (GET_CODE (rhs) == NEG)
1153 coeff1 = -1, rhs = XEXP (rhs, 0);
1154 else if (GET_CODE (rhs) == MULT
1155 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1156 {
1157 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1158 had_mult = 1;
1159 }
1160 else if (GET_CODE (rhs) == ASHIFT
1161 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1162 && INTVAL (XEXP (rhs, 1)) >= 0
1163 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1164 {
1165 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1166 rhs = XEXP (rhs, 0);
1167 }
1168
1169 if (rtx_equal_p (lhs, rhs))
1170 {
1171 tem = simplify_gen_binary (MULT, mode, lhs,
1172 GEN_INT (coeff0 + coeff1));
1173 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1174 }
1175 }
1176
1177 /* If one of the operands is a PLUS or a MINUS, see if we can
1178 simplify this by the associative law.
1179 Don't use the associative law for floating point.
1180 The inaccuracy makes it nonassociative,
1181 and subtle programs can break if operations are associated. */
1182
1183 if (INTEGRAL_MODE_P (mode)
1184 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1185 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1186 || (GET_CODE (op0) == CONST
1187 && GET_CODE (XEXP (op0, 0)) == PLUS)
1188 || (GET_CODE (op1) == CONST
1189 && GET_CODE (XEXP (op1, 0)) == PLUS))
1190 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1191 return tem;
1192 break;
1193
1194 case COMPARE:
1195 #ifdef HAVE_cc0
1196 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1197 using cc0, in which case we want to leave it as a COMPARE
1198 so we can distinguish it from a register-register-copy.
1199
1200 In IEEE floating point, x-0 is not the same as x. */
1201
1202 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1203 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1204 && trueop1 == CONST0_RTX (mode))
1205 return op0;
1206 #endif
1207
1208 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1209 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1210 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1211 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1212 {
1213 rtx xop00 = XEXP (op0, 0);
1214 rtx xop10 = XEXP (op1, 0);
1215
1216 #ifdef HAVE_cc0
1217 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1218 #else
1219 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1220 && GET_MODE (xop00) == GET_MODE (xop10)
1221 && REGNO (xop00) == REGNO (xop10)
1222 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1223 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1224 #endif
1225 return xop00;
1226 }
1227 break;
1228
1229 case MINUS:
1230 /* We can't assume x-x is 0 even with non-IEEE floating point,
1231 but since it is zero except in very strange circumstances, we
1232 will treat it as zero with -funsafe-math-optimizations. */
1233 if (rtx_equal_p (trueop0, trueop1)
1234 && ! side_effects_p (op0)
1235 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1236 return CONST0_RTX (mode);
1237
1238 /* Change subtraction from zero into negation. (0 - x) is the
1239 same as -x when x is NaN, infinite, or finite and nonzero.
1240 But if the mode has signed zeros, and does not round towards
1241 -infinity, then 0 - 0 is 0, not -0. */
1242 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1243 return gen_rtx_NEG (mode, op1);
1244
1245 /* (-1 - a) is ~a. */
1246 if (trueop0 == constm1_rtx)
1247 return gen_rtx_NOT (mode, op1);
1248
1249 /* Subtracting 0 has no effect unless the mode has signed zeros
1250 and supports rounding towards -infinity. In such a case,
1251 0 - 0 is -0. */
1252 if (!(HONOR_SIGNED_ZEROS (mode)
1253 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1254 && trueop1 == CONST0_RTX (mode))
1255 return op0;
1256
1257 /* See if this is something like X * C - X or vice versa or
1258 if the multiplication is written as a shift. If so, we can
1259 distribute and make a new multiply, shift, or maybe just
1260 have X (if C is 2 in the example above). But don't make
1261 real multiply if we didn't have one before. */
1262
1263 if (! FLOAT_MODE_P (mode))
1264 {
1265 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1266 rtx lhs = op0, rhs = op1;
1267 int had_mult = 0;
1268
1269 if (GET_CODE (lhs) == NEG)
1270 coeff0 = -1, lhs = XEXP (lhs, 0);
1271 else if (GET_CODE (lhs) == MULT
1272 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1273 {
1274 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1275 had_mult = 1;
1276 }
1277 else if (GET_CODE (lhs) == ASHIFT
1278 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1279 && INTVAL (XEXP (lhs, 1)) >= 0
1280 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1281 {
1282 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1283 lhs = XEXP (lhs, 0);
1284 }
1285
1286 if (GET_CODE (rhs) == NEG)
1287 coeff1 = - 1, rhs = XEXP (rhs, 0);
1288 else if (GET_CODE (rhs) == MULT
1289 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1290 {
1291 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1292 had_mult = 1;
1293 }
1294 else if (GET_CODE (rhs) == ASHIFT
1295 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1296 && INTVAL (XEXP (rhs, 1)) >= 0
1297 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1298 {
1299 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1300 rhs = XEXP (rhs, 0);
1301 }
1302
1303 if (rtx_equal_p (lhs, rhs))
1304 {
1305 tem = simplify_gen_binary (MULT, mode, lhs,
1306 GEN_INT (coeff0 - coeff1));
1307 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1308 }
1309 }
1310
1311 /* (a - (-b)) -> (a + b). True even for IEEE. */
1312 if (GET_CODE (op1) == NEG)
1313 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1314
1315 /* If one of the operands is a PLUS or a MINUS, see if we can
1316 simplify this by the associative law.
1317 Don't use the associative law for floating point.
1318 The inaccuracy makes it nonassociative,
1319 and subtle programs can break if operations are associated. */
1320
1321 if (INTEGRAL_MODE_P (mode)
1322 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1323 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1324 || (GET_CODE (op0) == CONST
1325 && GET_CODE (XEXP (op0, 0)) == PLUS)
1326 || (GET_CODE (op1) == CONST
1327 && GET_CODE (XEXP (op1, 0)) == PLUS))
1328 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1329 return tem;
1330
1331 /* Don't let a relocatable value get a negative coeff. */
1332 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1333 return simplify_gen_binary (PLUS, mode,
1334 op0,
1335 neg_const_int (mode, op1));
1336
1337 /* (x - (x & y)) -> (x & ~y) */
1338 if (GET_CODE (op1) == AND)
1339 {
1340 if (rtx_equal_p (op0, XEXP (op1, 0)))
1341 {
1342 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1343 GET_MODE (XEXP (op1, 1)));
1344 return simplify_gen_binary (AND, mode, op0, tem);
1345 }
1346 if (rtx_equal_p (op0, XEXP (op1, 1)))
1347 {
1348 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1349 GET_MODE (XEXP (op1, 0)));
1350 return simplify_gen_binary (AND, mode, op0, tem);
1351 }
1352 }
1353 break;
1354
1355 case MULT:
1356 if (trueop1 == constm1_rtx)
1357 {
1358 tem = simplify_unary_operation (NEG, mode, op0, mode);
1359
1360 return tem ? tem : gen_rtx_NEG (mode, op0);
1361 }
1362
1363 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1364 x is NaN, since x * 0 is then also NaN. Nor is it valid
1365 when the mode has signed zeros, since multiplying a negative
1366 number by 0 will give -0, not 0. */
1367 if (!HONOR_NANS (mode)
1368 && !HONOR_SIGNED_ZEROS (mode)
1369 && trueop1 == CONST0_RTX (mode)
1370 && ! side_effects_p (op0))
1371 return op1;
1372
1373 /* In IEEE floating point, x*1 is not equivalent to x for
1374 signalling NaNs. */
1375 if (!HONOR_SNANS (mode)
1376 && trueop1 == CONST1_RTX (mode))
1377 return op0;
1378
1379 /* Convert multiply by constant power of two into shift unless
1380 we are still generating RTL. This test is a kludge. */
1381 if (GET_CODE (trueop1) == CONST_INT
1382 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1383 /* If the mode is larger than the host word size, and the
1384 uppermost bit is set, then this isn't a power of two due
1385 to implicit sign extension. */
1386 && (width <= HOST_BITS_PER_WIDE_INT
1387 || val != HOST_BITS_PER_WIDE_INT - 1)
1388 && ! rtx_equal_function_value_matters)
1389 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1390
1391 /* x*2 is x+x and x*(-1) is -x */
1392 if (GET_CODE (trueop1) == CONST_DOUBLE
1393 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1394 && GET_MODE (op0) == mode)
1395 {
1396 REAL_VALUE_TYPE d;
1397 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1398
1399 if (REAL_VALUES_EQUAL (d, dconst2))
1400 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1401
1402 if (REAL_VALUES_EQUAL (d, dconstm1))
1403 return gen_rtx_NEG (mode, op0);
1404 }
1405 break;
1406
1407 case IOR:
1408 if (trueop1 == const0_rtx)
1409 return op0;
1410 if (GET_CODE (trueop1) == CONST_INT
1411 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1412 == GET_MODE_MASK (mode)))
1413 return op1;
1414 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1415 return op0;
1416 /* A | (~A) -> -1 */
1417 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1418 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1419 && ! side_effects_p (op0)
1420 && GET_MODE_CLASS (mode) != MODE_CC)
1421 return constm1_rtx;
1422 break;
1423
1424 case XOR:
1425 if (trueop1 == const0_rtx)
1426 return op0;
1427 if (GET_CODE (trueop1) == CONST_INT
1428 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1429 == GET_MODE_MASK (mode)))
1430 return gen_rtx_NOT (mode, op0);
1431 if (trueop0 == trueop1 && ! side_effects_p (op0)
1432 && GET_MODE_CLASS (mode) != MODE_CC)
1433 return const0_rtx;
1434 break;
1435
1436 case AND:
1437 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1438 return const0_rtx;
1439 if (GET_CODE (trueop1) == CONST_INT
1440 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1441 == GET_MODE_MASK (mode)))
1442 return op0;
1443 if (trueop0 == trueop1 && ! side_effects_p (op0)
1444 && GET_MODE_CLASS (mode) != MODE_CC)
1445 return op0;
1446 /* A & (~A) -> 0 */
1447 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1448 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1449 && ! side_effects_p (op0)
1450 && GET_MODE_CLASS (mode) != MODE_CC)
1451 return const0_rtx;
1452 break;
1453
1454 case UDIV:
1455 /* Convert divide by power of two into shift (divide by 1 handled
1456 below). */
1457 if (GET_CODE (trueop1) == CONST_INT
1458 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1459 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1460
1461 /* ... fall through ... */
1462
1463 case DIV:
1464 if (trueop1 == CONST1_RTX (mode))
1465 {
1466 /* On some platforms DIV uses narrower mode than its
1467 operands. */
1468 rtx x = gen_lowpart_common (mode, op0);
1469 if (x)
1470 return x;
1471 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1472 return gen_lowpart_SUBREG (mode, op0);
1473 else
1474 return op0;
1475 }
1476
1477 /* Maybe change 0 / x to 0. This transformation isn't safe for
1478 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1479 Nor is it safe for modes with signed zeros, since dividing
1480 0 by a negative number gives -0, not 0. */
1481 if (!HONOR_NANS (mode)
1482 && !HONOR_SIGNED_ZEROS (mode)
1483 && trueop0 == CONST0_RTX (mode)
1484 && ! side_effects_p (op1))
1485 return op0;
1486
1487 /* Change division by a constant into multiplication. Only do
1488 this with -funsafe-math-optimizations. */
1489 else if (GET_CODE (trueop1) == CONST_DOUBLE
1490 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1491 && trueop1 != CONST0_RTX (mode)
1492 && flag_unsafe_math_optimizations)
1493 {
1494 REAL_VALUE_TYPE d;
1495 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1496
1497 if (! REAL_VALUES_EQUAL (d, dconst0))
1498 {
1499 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1500 return gen_rtx_MULT (mode, op0,
1501 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1502 }
1503 }
1504 break;
1505
1506 case UMOD:
1507 /* Handle modulus by power of two (mod with 1 handled below). */
1508 if (GET_CODE (trueop1) == CONST_INT
1509 && exact_log2 (INTVAL (trueop1)) > 0)
1510 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1511
1512 /* ... fall through ... */
1513
1514 case MOD:
1515 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1516 && ! side_effects_p (op0) && ! side_effects_p (op1))
1517 return const0_rtx;
1518 break;
1519
1520 case ROTATERT:
1521 case ROTATE:
1522 case ASHIFTRT:
1523 /* Rotating ~0 always results in ~0. */
1524 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1525 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1526 && ! side_effects_p (op1))
1527 return op0;
1528
1529 /* ... fall through ... */
1530
1531 case ASHIFT:
1532 case LSHIFTRT:
1533 if (trueop1 == const0_rtx)
1534 return op0;
1535 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1536 return op0;
1537 break;
1538
1539 case SMIN:
1540 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1541 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1542 && ! side_effects_p (op0))
1543 return op1;
1544 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1545 return op0;
1546 break;
1547
1548 case SMAX:
1549 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1550 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1551 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1552 && ! side_effects_p (op0))
1553 return op1;
1554 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1555 return op0;
1556 break;
1557
1558 case UMIN:
1559 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1560 return op1;
1561 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1562 return op0;
1563 break;
1564
1565 case UMAX:
1566 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1567 return op1;
1568 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1569 return op0;
1570 break;
1571
1572 case SS_PLUS:
1573 case US_PLUS:
1574 case SS_MINUS:
1575 case US_MINUS:
1576 /* ??? There are simplifications that can be done. */
1577 return 0;
1578
1579 case VEC_SELECT:
1580 if (!VECTOR_MODE_P (mode))
1581 {
1582 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1583 || (mode
1584 != GET_MODE_INNER (GET_MODE (trueop0)))
1585 || GET_CODE (trueop1) != PARALLEL
1586 || XVECLEN (trueop1, 0) != 1
1587 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1588 abort ();
1589
1590 if (GET_CODE (trueop0) == CONST_VECTOR)
1591 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1592 }
1593 else
1594 {
1595 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1596 || (GET_MODE_INNER (mode)
1597 != GET_MODE_INNER (GET_MODE (trueop0)))
1598 || GET_CODE (trueop1) != PARALLEL)
1599 abort ();
1600
1601 if (GET_CODE (trueop0) == CONST_VECTOR)
1602 {
1603 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1607
1608 if (XVECLEN (trueop1, 0) != (int) n_elts)
1609 abort ();
1610 for (i = 0; i < n_elts; i++)
1611 {
1612 rtx x = XVECEXP (trueop1, 0, i);
1613
1614 if (GET_CODE (x) != CONST_INT)
1615 abort ();
1616 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1617 }
1618
1619 return gen_rtx_CONST_VECTOR (mode, v);
1620 }
1621 }
1622 return 0;
1623 case VEC_CONCAT:
1624 {
1625 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1626 ? GET_MODE (trueop0)
1627 : GET_MODE_INNER (mode));
1628 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1629 ? GET_MODE (trueop1)
1630 : GET_MODE_INNER (mode));
1631
1632 if (!VECTOR_MODE_P (mode)
1633 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1634 != GET_MODE_SIZE (mode)))
1635 abort ();
1636
1637 if ((VECTOR_MODE_P (op0_mode)
1638 && (GET_MODE_INNER (mode)
1639 != GET_MODE_INNER (op0_mode)))
1640 || (!VECTOR_MODE_P (op0_mode)
1641 && GET_MODE_INNER (mode) != op0_mode))
1642 abort ();
1643
1644 if ((VECTOR_MODE_P (op1_mode)
1645 && (GET_MODE_INNER (mode)
1646 != GET_MODE_INNER (op1_mode)))
1647 || (!VECTOR_MODE_P (op1_mode)
1648 && GET_MODE_INNER (mode) != op1_mode))
1649 abort ();
1650
1651 if ((GET_CODE (trueop0) == CONST_VECTOR
1652 || GET_CODE (trueop0) == CONST_INT
1653 || GET_CODE (trueop0) == CONST_DOUBLE)
1654 && (GET_CODE (trueop1) == CONST_VECTOR
1655 || GET_CODE (trueop1) == CONST_INT
1656 || GET_CODE (trueop1) == CONST_DOUBLE))
1657 {
1658 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1659 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1660 rtvec v = rtvec_alloc (n_elts);
1661 unsigned int i;
1662 unsigned in_n_elts = 1;
1663
1664 if (VECTOR_MODE_P (op0_mode))
1665 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1666 for (i = 0; i < n_elts; i++)
1667 {
1668 if (i < in_n_elts)
1669 {
1670 if (!VECTOR_MODE_P (op0_mode))
1671 RTVEC_ELT (v, i) = trueop0;
1672 else
1673 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1674 }
1675 else
1676 {
1677 if (!VECTOR_MODE_P (op1_mode))
1678 RTVEC_ELT (v, i) = trueop1;
1679 else
1680 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1681 i - in_n_elts);
1682 }
1683 }
1684
1685 return gen_rtx_CONST_VECTOR (mode, v);
1686 }
1687 }
1688 return 0;
1689
1690 default:
1691 abort ();
1692 }
1693
1694 return 0;
1695 }
1696
1697 /* Get the integer argument values in two forms:
1698 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1699
1700 arg0 = INTVAL (trueop0);
1701 arg1 = INTVAL (trueop1);
1702
1703 if (width < HOST_BITS_PER_WIDE_INT)
1704 {
1705 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1706 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1707
1708 arg0s = arg0;
1709 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1710 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1711
1712 arg1s = arg1;
1713 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1714 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1715 }
1716 else
1717 {
1718 arg0s = arg0;
1719 arg1s = arg1;
1720 }
1721
1722 /* Compute the value of the arithmetic. */
1723
1724 switch (code)
1725 {
1726 case PLUS:
1727 val = arg0s + arg1s;
1728 break;
1729
1730 case MINUS:
1731 val = arg0s - arg1s;
1732 break;
1733
1734 case MULT:
1735 val = arg0s * arg1s;
1736 break;
1737
1738 case DIV:
1739 if (arg1s == 0
1740 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1741 && arg1s == -1))
1742 return 0;
1743 val = arg0s / arg1s;
1744 break;
1745
1746 case MOD:
1747 if (arg1s == 0
1748 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1749 && arg1s == -1))
1750 return 0;
1751 val = arg0s % arg1s;
1752 break;
1753
1754 case UDIV:
1755 if (arg1 == 0
1756 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1757 && arg1s == -1))
1758 return 0;
1759 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1760 break;
1761
1762 case UMOD:
1763 if (arg1 == 0
1764 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1765 && arg1s == -1))
1766 return 0;
1767 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1768 break;
1769
1770 case AND:
1771 val = arg0 & arg1;
1772 break;
1773
1774 case IOR:
1775 val = arg0 | arg1;
1776 break;
1777
1778 case XOR:
1779 val = arg0 ^ arg1;
1780 break;
1781
1782 case LSHIFTRT:
1783 /* If shift count is undefined, don't fold it; let the machine do
1784 what it wants. But truncate it if the machine will do that. */
1785 if (arg1 < 0)
1786 return 0;
1787
1788 #ifdef SHIFT_COUNT_TRUNCATED
1789 if (SHIFT_COUNT_TRUNCATED)
1790 arg1 %= width;
1791 #endif
1792
1793 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1794 break;
1795
1796 case ASHIFT:
1797 if (arg1 < 0)
1798 return 0;
1799
1800 #ifdef SHIFT_COUNT_TRUNCATED
1801 if (SHIFT_COUNT_TRUNCATED)
1802 arg1 %= width;
1803 #endif
1804
1805 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1806 break;
1807
1808 case ASHIFTRT:
1809 if (arg1 < 0)
1810 return 0;
1811
1812 #ifdef SHIFT_COUNT_TRUNCATED
1813 if (SHIFT_COUNT_TRUNCATED)
1814 arg1 %= width;
1815 #endif
1816
1817 val = arg0s >> arg1;
1818
1819 /* Bootstrap compiler may not have sign extended the right shift.
1820 Manually extend the sign to insure bootstrap cc matches gcc. */
1821 if (arg0s < 0 && arg1 > 0)
1822 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1823
1824 break;
1825
1826 case ROTATERT:
1827 if (arg1 < 0)
1828 return 0;
1829
1830 arg1 %= width;
1831 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1832 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1833 break;
1834
1835 case ROTATE:
1836 if (arg1 < 0)
1837 return 0;
1838
1839 arg1 %= width;
1840 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1841 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1842 break;
1843
1844 case COMPARE:
1845 /* Do nothing here. */
1846 return 0;
1847
1848 case SMIN:
1849 val = arg0s <= arg1s ? arg0s : arg1s;
1850 break;
1851
1852 case UMIN:
1853 val = ((unsigned HOST_WIDE_INT) arg0
1854 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1855 break;
1856
1857 case SMAX:
1858 val = arg0s > arg1s ? arg0s : arg1s;
1859 break;
1860
1861 case UMAX:
1862 val = ((unsigned HOST_WIDE_INT) arg0
1863 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1864 break;
1865
1866 case SS_PLUS:
1867 case US_PLUS:
1868 case SS_MINUS:
1869 case US_MINUS:
1870 /* ??? There are simplifications that can be done. */
1871 return 0;
1872
1873 default:
1874 abort ();
1875 }
1876
1877 val = trunc_int_for_mode (val, mode);
1878
1879 return GEN_INT (val);
1880 }
1881 \f
1882 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1883 PLUS or MINUS.
1884
1885 Rather than test for specific case, we do this by a brute-force method
1886 and do all possible simplifications until no more changes occur. Then
1887 we rebuild the operation.
1888
1889 If FORCE is true, then always generate the rtx. This is used to
1890 canonicalize stuff emitted from simplify_gen_binary. Note that this
1891 can still fail if the rtx is too complex. It won't fail just because
1892 the result is not 'simpler' than the input, however. */
1893
1894 struct simplify_plus_minus_op_data
1895 {
1896 rtx op;
1897 int neg;
1898 };
1899
1900 static int
1901 simplify_plus_minus_op_data_cmp (p1, p2)
1902 const void *p1;
1903 const void *p2;
1904 {
1905 const struct simplify_plus_minus_op_data *d1 = p1;
1906 const struct simplify_plus_minus_op_data *d2 = p2;
1907
1908 return (commutative_operand_precedence (d2->op)
1909 - commutative_operand_precedence (d1->op));
1910 }
1911
1912 static rtx
1913 simplify_plus_minus (code, mode, op0, op1, force)
1914 enum rtx_code code;
1915 enum machine_mode mode;
1916 rtx op0, op1;
1917 int force;
1918 {
1919 struct simplify_plus_minus_op_data ops[8];
1920 rtx result, tem;
1921 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1922 int first, negate, changed;
1923 int i, j;
1924
1925 memset ((char *) ops, 0, sizeof ops);
1926
1927 /* Set up the two operands and then expand them until nothing has been
1928 changed. If we run out of room in our array, give up; this should
1929 almost never happen. */
1930
1931 ops[0].op = op0;
1932 ops[0].neg = 0;
1933 ops[1].op = op1;
1934 ops[1].neg = (code == MINUS);
1935
1936 do
1937 {
1938 changed = 0;
1939
1940 for (i = 0; i < n_ops; i++)
1941 {
1942 rtx this_op = ops[i].op;
1943 int this_neg = ops[i].neg;
1944 enum rtx_code this_code = GET_CODE (this_op);
1945
1946 switch (this_code)
1947 {
1948 case PLUS:
1949 case MINUS:
1950 if (n_ops == 7)
1951 return NULL_RTX;
1952
1953 ops[n_ops].op = XEXP (this_op, 1);
1954 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1955 n_ops++;
1956
1957 ops[i].op = XEXP (this_op, 0);
1958 input_ops++;
1959 changed = 1;
1960 break;
1961
1962 case NEG:
1963 ops[i].op = XEXP (this_op, 0);
1964 ops[i].neg = ! this_neg;
1965 changed = 1;
1966 break;
1967
1968 case CONST:
1969 if (n_ops < 7
1970 && GET_CODE (XEXP (this_op, 0)) == PLUS
1971 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1972 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1973 {
1974 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1975 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1976 ops[n_ops].neg = this_neg;
1977 n_ops++;
1978 input_consts++;
1979 changed = 1;
1980 }
1981 break;
1982
1983 case NOT:
1984 /* ~a -> (-a - 1) */
1985 if (n_ops != 7)
1986 {
1987 ops[n_ops].op = constm1_rtx;
1988 ops[n_ops++].neg = this_neg;
1989 ops[i].op = XEXP (this_op, 0);
1990 ops[i].neg = !this_neg;
1991 changed = 1;
1992 }
1993 break;
1994
1995 case CONST_INT:
1996 if (this_neg)
1997 {
1998 ops[i].op = neg_const_int (mode, this_op);
1999 ops[i].neg = 0;
2000 changed = 1;
2001 }
2002 break;
2003
2004 default:
2005 break;
2006 }
2007 }
2008 }
2009 while (changed);
2010
2011 /* If we only have two operands, we can't do anything. */
2012 if (n_ops <= 2 && !force)
2013 return NULL_RTX;
2014
2015 /* Count the number of CONSTs we didn't split above. */
2016 for (i = 0; i < n_ops; i++)
2017 if (GET_CODE (ops[i].op) == CONST)
2018 input_consts++;
2019
2020 /* Now simplify each pair of operands until nothing changes. The first
2021 time through just simplify constants against each other. */
2022
2023 first = 1;
2024 do
2025 {
2026 changed = first;
2027
2028 for (i = 0; i < n_ops - 1; i++)
2029 for (j = i + 1; j < n_ops; j++)
2030 {
2031 rtx lhs = ops[i].op, rhs = ops[j].op;
2032 int lneg = ops[i].neg, rneg = ops[j].neg;
2033
2034 if (lhs != 0 && rhs != 0
2035 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2036 {
2037 enum rtx_code ncode = PLUS;
2038
2039 if (lneg != rneg)
2040 {
2041 ncode = MINUS;
2042 if (lneg)
2043 tem = lhs, lhs = rhs, rhs = tem;
2044 }
2045 else if (swap_commutative_operands_p (lhs, rhs))
2046 tem = lhs, lhs = rhs, rhs = tem;
2047
2048 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2049
2050 /* Reject "simplifications" that just wrap the two
2051 arguments in a CONST. Failure to do so can result
2052 in infinite recursion with simplify_binary_operation
2053 when it calls us to simplify CONST operations. */
2054 if (tem
2055 && ! (GET_CODE (tem) == CONST
2056 && GET_CODE (XEXP (tem, 0)) == ncode
2057 && XEXP (XEXP (tem, 0), 0) == lhs
2058 && XEXP (XEXP (tem, 0), 1) == rhs)
2059 /* Don't allow -x + -1 -> ~x simplifications in the
2060 first pass. This allows us the chance to combine
2061 the -1 with other constants. */
2062 && ! (first
2063 && GET_CODE (tem) == NOT
2064 && XEXP (tem, 0) == rhs))
2065 {
2066 lneg &= rneg;
2067 if (GET_CODE (tem) == NEG)
2068 tem = XEXP (tem, 0), lneg = !lneg;
2069 if (GET_CODE (tem) == CONST_INT && lneg)
2070 tem = neg_const_int (mode, tem), lneg = 0;
2071
2072 ops[i].op = tem;
2073 ops[i].neg = lneg;
2074 ops[j].op = NULL_RTX;
2075 changed = 1;
2076 }
2077 }
2078 }
2079
2080 first = 0;
2081 }
2082 while (changed);
2083
2084 /* Pack all the operands to the lower-numbered entries. */
2085 for (i = 0, j = 0; j < n_ops; j++)
2086 if (ops[j].op)
2087 ops[i++] = ops[j];
2088 n_ops = i;
2089
2090 /* Sort the operations based on swap_commutative_operands_p. */
2091 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2092
2093 /* We suppressed creation of trivial CONST expressions in the
2094 combination loop to avoid recursion. Create one manually now.
2095 The combination loop should have ensured that there is exactly
2096 one CONST_INT, and the sort will have ensured that it is last
2097 in the array and that any other constant will be next-to-last. */
2098
2099 if (n_ops > 1
2100 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2101 && CONSTANT_P (ops[n_ops - 2].op))
2102 {
2103 rtx value = ops[n_ops - 1].op;
2104 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2105 value = neg_const_int (mode, value);
2106 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2107 n_ops--;
2108 }
2109
2110 /* Count the number of CONSTs that we generated. */
2111 n_consts = 0;
2112 for (i = 0; i < n_ops; i++)
2113 if (GET_CODE (ops[i].op) == CONST)
2114 n_consts++;
2115
2116 /* Give up if we didn't reduce the number of operands we had. Make
2117 sure we count a CONST as two operands. If we have the same
2118 number of operands, but have made more CONSTs than before, this
2119 is also an improvement, so accept it. */
2120 if (!force
2121 && (n_ops + n_consts > input_ops
2122 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2123 return NULL_RTX;
2124
2125 /* Put a non-negated operand first. If there aren't any, make all
2126 operands positive and negate the whole thing later. */
2127
2128 negate = 0;
2129 for (i = 0; i < n_ops && ops[i].neg; i++)
2130 continue;
2131 if (i == n_ops)
2132 {
2133 for (i = 0; i < n_ops; i++)
2134 ops[i].neg = 0;
2135 negate = 1;
2136 }
2137 else if (i != 0)
2138 {
2139 tem = ops[0].op;
2140 ops[0] = ops[i];
2141 ops[i].op = tem;
2142 ops[i].neg = 1;
2143 }
2144
2145 /* Now make the result by performing the requested operations. */
2146 result = ops[0].op;
2147 for (i = 1; i < n_ops; i++)
2148 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2149 mode, result, ops[i].op);
2150
2151 return negate ? gen_rtx_NEG (mode, result) : result;
2152 }
2153
2154 /* Like simplify_binary_operation except used for relational operators.
2155 MODE is the mode of the operands, not that of the result. If MODE
2156 is VOIDmode, both operands must also be VOIDmode and we compare the
2157 operands in "infinite precision".
2158
2159 If no simplification is possible, this function returns zero. Otherwise,
2160 it returns either const_true_rtx or const0_rtx. */
2161
2162 rtx
2163 simplify_relational_operation (code, mode, op0, op1)
2164 enum rtx_code code;
2165 enum machine_mode mode;
2166 rtx op0, op1;
2167 {
2168 int equal, op0lt, op0ltu, op1lt, op1ltu;
2169 rtx tem;
2170 rtx trueop0;
2171 rtx trueop1;
2172
2173 if (mode == VOIDmode
2174 && (GET_MODE (op0) != VOIDmode
2175 || GET_MODE (op1) != VOIDmode))
2176 abort ();
2177
2178 /* If op0 is a compare, extract the comparison arguments from it. */
2179 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2180 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2181
2182 trueop0 = avoid_constant_pool_reference (op0);
2183 trueop1 = avoid_constant_pool_reference (op1);
2184
2185 /* We can't simplify MODE_CC values since we don't know what the
2186 actual comparison is. */
2187 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2188 return 0;
2189
2190 /* Make sure the constant is second. */
2191 if (swap_commutative_operands_p (trueop0, trueop1))
2192 {
2193 tem = op0, op0 = op1, op1 = tem;
2194 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2195 code = swap_condition (code);
2196 }
2197
2198 /* For integer comparisons of A and B maybe we can simplify A - B and can
2199 then simplify a comparison of that with zero. If A and B are both either
2200 a register or a CONST_INT, this can't help; testing for these cases will
2201 prevent infinite recursion here and speed things up.
2202
2203 If CODE is an unsigned comparison, then we can never do this optimization,
2204 because it gives an incorrect result if the subtraction wraps around zero.
2205 ANSI C defines unsigned operations such that they never overflow, and
2206 thus such cases can not be ignored. */
2207
2208 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2209 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2210 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2211 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2212 && code != GTU && code != GEU && code != LTU && code != LEU)
2213 return simplify_relational_operation (signed_condition (code),
2214 mode, tem, const0_rtx);
2215
2216 if (flag_unsafe_math_optimizations && code == ORDERED)
2217 return const_true_rtx;
2218
2219 if (flag_unsafe_math_optimizations && code == UNORDERED)
2220 return const0_rtx;
2221
2222 /* For modes without NaNs, if the two operands are equal, we know the
2223 result. */
2224 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
2225 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2226
2227 /* If the operands are floating-point constants, see if we can fold
2228 the result. */
2229 else if (GET_CODE (trueop0) == CONST_DOUBLE
2230 && GET_CODE (trueop1) == CONST_DOUBLE
2231 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2232 {
2233 REAL_VALUE_TYPE d0, d1;
2234
2235 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2236 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2237
2238 /* Comparisons are unordered iff at least one of the values is NaN. */
2239 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2240 switch (code)
2241 {
2242 case UNEQ:
2243 case UNLT:
2244 case UNGT:
2245 case UNLE:
2246 case UNGE:
2247 case NE:
2248 case UNORDERED:
2249 return const_true_rtx;
2250 case EQ:
2251 case LT:
2252 case GT:
2253 case LE:
2254 case GE:
2255 case LTGT:
2256 case ORDERED:
2257 return const0_rtx;
2258 default:
2259 return 0;
2260 }
2261
2262 equal = REAL_VALUES_EQUAL (d0, d1);
2263 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2264 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2265 }
2266
2267 /* Otherwise, see if the operands are both integers. */
2268 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2269 && (GET_CODE (trueop0) == CONST_DOUBLE
2270 || GET_CODE (trueop0) == CONST_INT)
2271 && (GET_CODE (trueop1) == CONST_DOUBLE
2272 || GET_CODE (trueop1) == CONST_INT))
2273 {
2274 int width = GET_MODE_BITSIZE (mode);
2275 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2276 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2277
2278 /* Get the two words comprising each integer constant. */
2279 if (GET_CODE (trueop0) == CONST_DOUBLE)
2280 {
2281 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2282 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2283 }
2284 else
2285 {
2286 l0u = l0s = INTVAL (trueop0);
2287 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2288 }
2289
2290 if (GET_CODE (trueop1) == CONST_DOUBLE)
2291 {
2292 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2293 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2294 }
2295 else
2296 {
2297 l1u = l1s = INTVAL (trueop1);
2298 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2299 }
2300
2301 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2302 we have to sign or zero-extend the values. */
2303 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2304 {
2305 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2306 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2307
2308 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2309 l0s |= ((HOST_WIDE_INT) (-1) << width);
2310
2311 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2312 l1s |= ((HOST_WIDE_INT) (-1) << width);
2313 }
2314 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2315 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2316
2317 equal = (h0u == h1u && l0u == l1u);
2318 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2319 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2320 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2321 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2322 }
2323
2324 /* Otherwise, there are some code-specific tests we can make. */
2325 else
2326 {
2327 switch (code)
2328 {
2329 case EQ:
2330 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2331 return const0_rtx;
2332 break;
2333
2334 case NE:
2335 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2336 return const_true_rtx;
2337 break;
2338
2339 case GEU:
2340 /* Unsigned values are never negative. */
2341 if (trueop1 == const0_rtx)
2342 return const_true_rtx;
2343 break;
2344
2345 case LTU:
2346 if (trueop1 == const0_rtx)
2347 return const0_rtx;
2348 break;
2349
2350 case LEU:
2351 /* Unsigned values are never greater than the largest
2352 unsigned value. */
2353 if (GET_CODE (trueop1) == CONST_INT
2354 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2355 && INTEGRAL_MODE_P (mode))
2356 return const_true_rtx;
2357 break;
2358
2359 case GTU:
2360 if (GET_CODE (trueop1) == CONST_INT
2361 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2362 && INTEGRAL_MODE_P (mode))
2363 return const0_rtx;
2364 break;
2365
2366 case LT:
2367 /* Optimize abs(x) < 0.0. */
2368 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2369 {
2370 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2371 : trueop0;
2372 if (GET_CODE (tem) == ABS)
2373 return const0_rtx;
2374 }
2375 break;
2376
2377 case GE:
2378 /* Optimize abs(x) >= 0.0. */
2379 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2380 {
2381 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2382 : trueop0;
2383 if (GET_CODE (tem) == ABS)
2384 return const_true_rtx;
2385 }
2386 break;
2387
2388 case UNGE:
2389 /* Optimize ! (abs(x) < 0.0). */
2390 if (trueop1 == CONST0_RTX (mode))
2391 {
2392 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2393 : trueop0;
2394 if (GET_CODE (tem) == ABS)
2395 return const_true_rtx;
2396 }
2397 break;
2398
2399 default:
2400 break;
2401 }
2402
2403 return 0;
2404 }
2405
2406 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2407 as appropriate. */
2408 switch (code)
2409 {
2410 case EQ:
2411 case UNEQ:
2412 return equal ? const_true_rtx : const0_rtx;
2413 case NE:
2414 case LTGT:
2415 return ! equal ? const_true_rtx : const0_rtx;
2416 case LT:
2417 case UNLT:
2418 return op0lt ? const_true_rtx : const0_rtx;
2419 case GT:
2420 case UNGT:
2421 return op1lt ? const_true_rtx : const0_rtx;
2422 case LTU:
2423 return op0ltu ? const_true_rtx : const0_rtx;
2424 case GTU:
2425 return op1ltu ? const_true_rtx : const0_rtx;
2426 case LE:
2427 case UNLE:
2428 return equal || op0lt ? const_true_rtx : const0_rtx;
2429 case GE:
2430 case UNGE:
2431 return equal || op1lt ? const_true_rtx : const0_rtx;
2432 case LEU:
2433 return equal || op0ltu ? const_true_rtx : const0_rtx;
2434 case GEU:
2435 return equal || op1ltu ? const_true_rtx : const0_rtx;
2436 case ORDERED:
2437 return const_true_rtx;
2438 case UNORDERED:
2439 return const0_rtx;
2440 default:
2441 abort ();
2442 }
2443 }
2444 \f
2445 /* Simplify CODE, an operation with result mode MODE and three operands,
2446 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2447 a constant. Return 0 if no simplifications is possible. */
2448
2449 rtx
2450 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2451 enum rtx_code code;
2452 enum machine_mode mode, op0_mode;
2453 rtx op0, op1, op2;
2454 {
2455 unsigned int width = GET_MODE_BITSIZE (mode);
2456
2457 /* VOIDmode means "infinite" precision. */
2458 if (width == 0)
2459 width = HOST_BITS_PER_WIDE_INT;
2460
2461 switch (code)
2462 {
2463 case SIGN_EXTRACT:
2464 case ZERO_EXTRACT:
2465 if (GET_CODE (op0) == CONST_INT
2466 && GET_CODE (op1) == CONST_INT
2467 && GET_CODE (op2) == CONST_INT
2468 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2469 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2470 {
2471 /* Extracting a bit-field from a constant */
2472 HOST_WIDE_INT val = INTVAL (op0);
2473
2474 if (BITS_BIG_ENDIAN)
2475 val >>= (GET_MODE_BITSIZE (op0_mode)
2476 - INTVAL (op2) - INTVAL (op1));
2477 else
2478 val >>= INTVAL (op2);
2479
2480 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2481 {
2482 /* First zero-extend. */
2483 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2484 /* If desired, propagate sign bit. */
2485 if (code == SIGN_EXTRACT
2486 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2487 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2488 }
2489
2490 /* Clear the bits that don't belong in our mode,
2491 unless they and our sign bit are all one.
2492 So we get either a reasonable negative value or a reasonable
2493 unsigned value for this mode. */
2494 if (width < HOST_BITS_PER_WIDE_INT
2495 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2496 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2497 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2498
2499 return GEN_INT (val);
2500 }
2501 break;
2502
2503 case IF_THEN_ELSE:
2504 if (GET_CODE (op0) == CONST_INT)
2505 return op0 != const0_rtx ? op1 : op2;
2506
2507 /* Convert a == b ? b : a to "a". */
2508 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2509 && !HONOR_NANS (mode)
2510 && rtx_equal_p (XEXP (op0, 0), op1)
2511 && rtx_equal_p (XEXP (op0, 1), op2))
2512 return op1;
2513 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2514 && !HONOR_NANS (mode)
2515 && rtx_equal_p (XEXP (op0, 1), op1)
2516 && rtx_equal_p (XEXP (op0, 0), op2))
2517 return op2;
2518 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2519 {
2520 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2521 ? GET_MODE (XEXP (op0, 1))
2522 : GET_MODE (XEXP (op0, 0)));
2523 rtx temp;
2524 if (cmp_mode == VOIDmode)
2525 cmp_mode = op0_mode;
2526 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2527 XEXP (op0, 0), XEXP (op0, 1));
2528
2529 /* See if any simplifications were possible. */
2530 if (temp == const0_rtx)
2531 return op2;
2532 else if (temp == const1_rtx)
2533 return op1;
2534 else if (temp)
2535 op0 = temp;
2536
2537 /* Look for happy constants in op1 and op2. */
2538 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2539 {
2540 HOST_WIDE_INT t = INTVAL (op1);
2541 HOST_WIDE_INT f = INTVAL (op2);
2542
2543 if (t == STORE_FLAG_VALUE && f == 0)
2544 code = GET_CODE (op0);
2545 else if (t == 0 && f == STORE_FLAG_VALUE)
2546 {
2547 enum rtx_code tmp;
2548 tmp = reversed_comparison_code (op0, NULL_RTX);
2549 if (tmp == UNKNOWN)
2550 break;
2551 code = tmp;
2552 }
2553 else
2554 break;
2555
2556 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2557 }
2558 }
2559 break;
2560 case VEC_MERGE:
2561 if (GET_MODE (op0) != mode
2562 || GET_MODE (op1) != mode
2563 || !VECTOR_MODE_P (mode))
2564 abort ();
2565 op2 = avoid_constant_pool_reference (op2);
2566 if (GET_CODE (op2) == CONST_INT)
2567 {
2568 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2569 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2570 int mask = (1 << n_elts) - 1;
2571
2572 if (!(INTVAL (op2) & mask))
2573 return op1;
2574 if ((INTVAL (op2) & mask) == mask)
2575 return op0;
2576
2577 op0 = avoid_constant_pool_reference (op0);
2578 op1 = avoid_constant_pool_reference (op1);
2579 if (GET_CODE (op0) == CONST_VECTOR
2580 && GET_CODE (op1) == CONST_VECTOR)
2581 {
2582 rtvec v = rtvec_alloc (n_elts);
2583 unsigned int i;
2584
2585 for (i = 0; i < n_elts; i++)
2586 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2587 ? CONST_VECTOR_ELT (op0, i)
2588 : CONST_VECTOR_ELT (op1, i));
2589 return gen_rtx_CONST_VECTOR (mode, v);
2590 }
2591 }
2592 break;
2593
2594 default:
2595 abort ();
2596 }
2597
2598 return 0;
2599 }
2600
2601 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2602 Return 0 if no simplifications is possible. */
2603 rtx
2604 simplify_subreg (outermode, op, innermode, byte)
2605 rtx op;
2606 unsigned int byte;
2607 enum machine_mode outermode, innermode;
2608 {
2609 /* Little bit of sanity checking. */
2610 if (innermode == VOIDmode || outermode == VOIDmode
2611 || innermode == BLKmode || outermode == BLKmode)
2612 abort ();
2613
2614 if (GET_MODE (op) != innermode
2615 && GET_MODE (op) != VOIDmode)
2616 abort ();
2617
2618 if (byte % GET_MODE_SIZE (outermode)
2619 || byte >= GET_MODE_SIZE (innermode))
2620 abort ();
2621
2622 if (outermode == innermode && !byte)
2623 return op;
2624
2625 /* Simplify subregs of vector constants. */
2626 if (GET_CODE (op) == CONST_VECTOR)
2627 {
2628 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2629 const unsigned int offset = byte / elt_size;
2630 rtx elt;
2631
2632 if (GET_MODE_INNER (innermode) == outermode)
2633 {
2634 elt = CONST_VECTOR_ELT (op, offset);
2635
2636 /* ?? We probably don't need this copy_rtx because constants
2637 can be shared. ?? */
2638
2639 return copy_rtx (elt);
2640 }
2641 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2642 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2643 {
2644 return (gen_rtx_CONST_VECTOR
2645 (outermode,
2646 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2647 &CONST_VECTOR_ELT (op, offset))));
2648 }
2649 else if (GET_MODE_CLASS (outermode) == MODE_INT
2650 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2651 {
2652 /* This happens when the target register size is smaller then
2653 the vector mode, and we synthesize operations with vectors
2654 of elements that are smaller than the register size. */
2655 HOST_WIDE_INT sum = 0, high = 0;
2656 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2657 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2658 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2659 int shift = BITS_PER_UNIT * elt_size;
2660
2661 for (; n_elts--; i += step)
2662 {
2663 elt = CONST_VECTOR_ELT (op, i);
2664 if (GET_CODE (elt) == CONST_DOUBLE
2665 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2666 {
2667 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2668 elt);
2669 if (! elt)
2670 return NULL_RTX;
2671 }
2672 if (GET_CODE (elt) != CONST_INT)
2673 return NULL_RTX;
2674 /* Avoid overflow. */
2675 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2676 return NULL_RTX;
2677 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2678 sum = (sum << shift) + INTVAL (elt);
2679 }
2680 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2681 return GEN_INT (trunc_int_for_mode (sum, outermode));
2682 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2683 return immed_double_const (sum, high, outermode);
2684 else
2685 return NULL_RTX;
2686 }
2687 else if (GET_MODE_CLASS (outermode) == MODE_INT
2688 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2689 {
2690 enum machine_mode new_mode
2691 = int_mode_for_mode (GET_MODE_INNER (innermode));
2692 int subbyte = byte % elt_size;
2693
2694 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2695 if (! op)
2696 return NULL_RTX;
2697 return simplify_subreg (outermode, op, new_mode, subbyte);
2698 }
2699 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2700 /* This shouldn't happen, but let's not do anything stupid. */
2701 return NULL_RTX;
2702 }
2703
2704 /* Attempt to simplify constant to non-SUBREG expression. */
2705 if (CONSTANT_P (op))
2706 {
2707 int offset, part;
2708 unsigned HOST_WIDE_INT val = 0;
2709
2710 if (VECTOR_MODE_P (outermode))
2711 {
2712 /* Construct a CONST_VECTOR from individual subregs. */
2713 enum machine_mode submode = GET_MODE_INNER (outermode);
2714 int subsize = GET_MODE_UNIT_SIZE (outermode);
2715 int i, elts = GET_MODE_NUNITS (outermode);
2716 rtvec v = rtvec_alloc (elts);
2717 rtx elt;
2718
2719 for (i = 0; i < elts; i++, byte += subsize)
2720 {
2721 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2722 /* ??? It would be nice if we could actually make such subregs
2723 on targets that allow such relocations. */
2724 if (byte >= GET_MODE_SIZE (innermode))
2725 elt = CONST0_RTX (submode);
2726 else
2727 elt = simplify_subreg (submode, op, innermode, byte);
2728 if (! elt)
2729 return NULL_RTX;
2730 RTVEC_ELT (v, i) = elt;
2731 }
2732 return gen_rtx_CONST_VECTOR (outermode, v);
2733 }
2734
2735 /* ??? This code is partly redundant with code below, but can handle
2736 the subregs of floats and similar corner cases.
2737 Later it we should move all simplification code here and rewrite
2738 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2739 using SIMPLIFY_SUBREG. */
2740 if (subreg_lowpart_offset (outermode, innermode) == byte
2741 && GET_CODE (op) != CONST_VECTOR)
2742 {
2743 rtx new = gen_lowpart_if_possible (outermode, op);
2744 if (new)
2745 return new;
2746 }
2747
2748 /* Similar comment as above apply here. */
2749 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2750 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2751 && GET_MODE_CLASS (outermode) == MODE_INT)
2752 {
2753 rtx new = constant_subword (op,
2754 (byte / UNITS_PER_WORD),
2755 innermode);
2756 if (new)
2757 return new;
2758 }
2759
2760 if (GET_MODE_CLASS (outermode) != MODE_INT
2761 && GET_MODE_CLASS (outermode) != MODE_CC)
2762 {
2763 enum machine_mode new_mode = int_mode_for_mode (outermode);
2764
2765 if (new_mode != innermode || byte != 0)
2766 {
2767 op = simplify_subreg (new_mode, op, innermode, byte);
2768 if (! op)
2769 return NULL_RTX;
2770 return simplify_subreg (outermode, op, new_mode, 0);
2771 }
2772 }
2773
2774 offset = byte * BITS_PER_UNIT;
2775 switch (GET_CODE (op))
2776 {
2777 case CONST_DOUBLE:
2778 if (GET_MODE (op) != VOIDmode)
2779 break;
2780
2781 /* We can't handle this case yet. */
2782 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2783 return NULL_RTX;
2784
2785 part = offset >= HOST_BITS_PER_WIDE_INT;
2786 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2787 && BYTES_BIG_ENDIAN)
2788 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2789 && WORDS_BIG_ENDIAN))
2790 part = !part;
2791 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2792 offset %= HOST_BITS_PER_WIDE_INT;
2793
2794 /* We've already picked the word we want from a double, so
2795 pretend this is actually an integer. */
2796 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2797
2798 /* FALLTHROUGH */
2799 case CONST_INT:
2800 if (GET_CODE (op) == CONST_INT)
2801 val = INTVAL (op);
2802
2803 /* We don't handle synthesizing of non-integral constants yet. */
2804 if (GET_MODE_CLASS (outermode) != MODE_INT)
2805 return NULL_RTX;
2806
2807 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2808 {
2809 if (WORDS_BIG_ENDIAN)
2810 offset = (GET_MODE_BITSIZE (innermode)
2811 - GET_MODE_BITSIZE (outermode) - offset);
2812 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2813 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2814 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2815 - 2 * (offset % BITS_PER_WORD));
2816 }
2817
2818 if (offset >= HOST_BITS_PER_WIDE_INT)
2819 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2820 else
2821 {
2822 val >>= offset;
2823 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2824 val = trunc_int_for_mode (val, outermode);
2825 return GEN_INT (val);
2826 }
2827 default:
2828 break;
2829 }
2830 }
2831
2832 /* Changing mode twice with SUBREG => just change it once,
2833 or not at all if changing back op starting mode. */
2834 if (GET_CODE (op) == SUBREG)
2835 {
2836 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2837 int final_offset = byte + SUBREG_BYTE (op);
2838 rtx new;
2839
2840 if (outermode == innermostmode
2841 && byte == 0 && SUBREG_BYTE (op) == 0)
2842 return SUBREG_REG (op);
2843
2844 /* The SUBREG_BYTE represents offset, as if the value were stored
2845 in memory. Irritating exception is paradoxical subreg, where
2846 we define SUBREG_BYTE to be 0. On big endian machines, this
2847 value should be negative. For a moment, undo this exception. */
2848 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2849 {
2850 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2851 if (WORDS_BIG_ENDIAN)
2852 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2853 if (BYTES_BIG_ENDIAN)
2854 final_offset += difference % UNITS_PER_WORD;
2855 }
2856 if (SUBREG_BYTE (op) == 0
2857 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2858 {
2859 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2860 if (WORDS_BIG_ENDIAN)
2861 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2862 if (BYTES_BIG_ENDIAN)
2863 final_offset += difference % UNITS_PER_WORD;
2864 }
2865
2866 /* See whether resulting subreg will be paradoxical. */
2867 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2868 {
2869 /* In nonparadoxical subregs we can't handle negative offsets. */
2870 if (final_offset < 0)
2871 return NULL_RTX;
2872 /* Bail out in case resulting subreg would be incorrect. */
2873 if (final_offset % GET_MODE_SIZE (outermode)
2874 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2875 return NULL_RTX;
2876 }
2877 else
2878 {
2879 int offset = 0;
2880 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2881
2882 /* In paradoxical subreg, see if we are still looking on lower part.
2883 If so, our SUBREG_BYTE will be 0. */
2884 if (WORDS_BIG_ENDIAN)
2885 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2886 if (BYTES_BIG_ENDIAN)
2887 offset += difference % UNITS_PER_WORD;
2888 if (offset == final_offset)
2889 final_offset = 0;
2890 else
2891 return NULL_RTX;
2892 }
2893
2894 /* Recurse for futher possible simplifications. */
2895 new = simplify_subreg (outermode, SUBREG_REG (op),
2896 GET_MODE (SUBREG_REG (op)),
2897 final_offset);
2898 if (new)
2899 return new;
2900 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2901 }
2902
2903 /* SUBREG of a hard register => just change the register number
2904 and/or mode. If the hard register is not valid in that mode,
2905 suppress this simplification. If the hard register is the stack,
2906 frame, or argument pointer, leave this as a SUBREG. */
2907
2908 if (REG_P (op)
2909 && (! REG_FUNCTION_VALUE_P (op)
2910 || ! rtx_equal_function_value_matters)
2911 && REGNO (op) < FIRST_PSEUDO_REGISTER
2912 #ifdef CANNOT_CHANGE_MODE_CLASS
2913 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2914 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2915 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2916 #endif
2917 && ((reload_completed && !frame_pointer_needed)
2918 || (REGNO (op) != FRAME_POINTER_REGNUM
2919 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2920 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2921 #endif
2922 ))
2923 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2924 && REGNO (op) != ARG_POINTER_REGNUM
2925 #endif
2926 && REGNO (op) != STACK_POINTER_REGNUM)
2927 {
2928 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2929 0);
2930
2931 /* ??? We do allow it if the current REG is not valid for
2932 its mode. This is a kludge to work around how float/complex
2933 arguments are passed on 32-bit SPARC and should be fixed. */
2934 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2935 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2936 {
2937 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2938
2939 /* Propagate original regno. We don't have any way to specify
2940 the offset inside original regno, so do so only for lowpart.
2941 The information is used only by alias analysis that can not
2942 grog partial register anyway. */
2943
2944 if (subreg_lowpart_offset (outermode, innermode) == byte)
2945 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2946 return x;
2947 }
2948 }
2949
2950 /* If we have a SUBREG of a register that we are replacing and we are
2951 replacing it with a MEM, make a new MEM and try replacing the
2952 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2953 or if we would be widening it. */
2954
2955 if (GET_CODE (op) == MEM
2956 && ! mode_dependent_address_p (XEXP (op, 0))
2957 /* Allow splitting of volatile memory references in case we don't
2958 have instruction to move the whole thing. */
2959 && (! MEM_VOLATILE_P (op)
2960 || ! have_insn_for (SET, innermode))
2961 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2962 return adjust_address_nv (op, outermode, byte);
2963
2964 /* Handle complex values represented as CONCAT
2965 of real and imaginary part. */
2966 if (GET_CODE (op) == CONCAT)
2967 {
2968 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2969 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2970 unsigned int final_offset;
2971 rtx res;
2972
2973 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2974 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2975 if (res)
2976 return res;
2977 /* We can at least simplify it by referring directly to the relevant part. */
2978 return gen_rtx_SUBREG (outermode, part, final_offset);
2979 }
2980
2981 return NULL_RTX;
2982 }
2983 /* Make a SUBREG operation or equivalent if it folds. */
2984
2985 rtx
2986 simplify_gen_subreg (outermode, op, innermode, byte)
2987 rtx op;
2988 unsigned int byte;
2989 enum machine_mode outermode, innermode;
2990 {
2991 rtx new;
2992 /* Little bit of sanity checking. */
2993 if (innermode == VOIDmode || outermode == VOIDmode
2994 || innermode == BLKmode || outermode == BLKmode)
2995 abort ();
2996
2997 if (GET_MODE (op) != innermode
2998 && GET_MODE (op) != VOIDmode)
2999 abort ();
3000
3001 if (byte % GET_MODE_SIZE (outermode)
3002 || byte >= GET_MODE_SIZE (innermode))
3003 abort ();
3004
3005 if (GET_CODE (op) == QUEUED)
3006 return NULL_RTX;
3007
3008 new = simplify_subreg (outermode, op, innermode, byte);
3009 if (new)
3010 return new;
3011
3012 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3013 return NULL_RTX;
3014
3015 return gen_rtx_SUBREG (outermode, op, byte);
3016 }
3017 /* Simplify X, an rtx expression.
3018
3019 Return the simplified expression or NULL if no simplifications
3020 were possible.
3021
3022 This is the preferred entry point into the simplification routines;
3023 however, we still allow passes to call the more specific routines.
3024
3025 Right now GCC has three (yes, three) major bodies of RTL simplification
3026 code that need to be unified.
3027
3028 1. fold_rtx in cse.c. This code uses various CSE specific
3029 information to aid in RTL simplification.
3030
3031 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3032 it uses combine specific information to aid in RTL
3033 simplification.
3034
3035 3. The routines in this file.
3036
3037
3038 Long term we want to only have one body of simplification code; to
3039 get to that state I recommend the following steps:
3040
3041 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3042 which are not pass dependent state into these routines.
3043
3044 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3045 use this routine whenever possible.
3046
3047 3. Allow for pass dependent state to be provided to these
3048 routines and add simplifications based on the pass dependent
3049 state. Remove code from cse.c & combine.c that becomes
3050 redundant/dead.
3051
3052 It will take time, but ultimately the compiler will be easier to
3053 maintain and improve. It's totally silly that when we add a
3054 simplification that it needs to be added to 4 places (3 for RTL
3055 simplification and 1 for tree simplification. */
3056
3057 rtx
3058 simplify_rtx (x)
3059 rtx x;
3060 {
3061 enum rtx_code code = GET_CODE (x);
3062 enum machine_mode mode = GET_MODE (x);
3063
3064 switch (GET_RTX_CLASS (code))
3065 {
3066 case '1':
3067 return simplify_unary_operation (code, mode,
3068 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3069 case 'c':
3070 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3071 {
3072 rtx tem;
3073
3074 tem = XEXP (x, 0);
3075 XEXP (x, 0) = XEXP (x, 1);
3076 XEXP (x, 1) = tem;
3077 return simplify_binary_operation (code, mode,
3078 XEXP (x, 0), XEXP (x, 1));
3079 }
3080
3081 case '2':
3082 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3083
3084 case '3':
3085 case 'b':
3086 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3087 XEXP (x, 0), XEXP (x, 1),
3088 XEXP (x, 2));
3089
3090 case '<':
3091 return simplify_relational_operation (code,
3092 ((GET_MODE (XEXP (x, 0))
3093 != VOIDmode)
3094 ? GET_MODE (XEXP (x, 0))
3095 : GET_MODE (XEXP (x, 1))),
3096 XEXP (x, 0), XEXP (x, 1));
3097 case 'x':
3098 if (code == SUBREG)
3099 return simplify_gen_subreg (mode, SUBREG_REG (x),
3100 GET_MODE (SUBREG_REG (x)),
3101 SUBREG_BYTE (x));
3102 if (code == CONSTANT_P_RTX)
3103 {
3104 if (CONSTANT_P (XEXP (x, 0)))
3105 return const1_rtx;
3106 }
3107 return NULL;
3108 default:
3109 return NULL;
3110 }
3111 }