simplify-rtx.c (simplify_replace_rtx): Convert constant comparisons to MODE_FLOAT...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 \f
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
59 static rtx
60 neg_const_int (enum machine_mode mode, rtx i)
61 {
62 return gen_int_mode (- INTVAL (i), mode);
63 }
64
65 \f
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
68
69 rtx
70 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
71 rtx op1)
72 {
73 rtx tem;
74
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code) == 'c'
77 && swap_commutative_operands_p (op0, op1))
78 tem = op0, op0 = op1, op1 = tem;
79
80 /* If this simplifies, do it. */
81 tem = simplify_binary_operation (code, mode, op0, op1);
82 if (tem)
83 return tem;
84
85 /* Handle addition and subtraction specially. Otherwise, just form
86 the operation. */
87
88 if (code == PLUS || code == MINUS)
89 {
90 tem = simplify_plus_minus (code, mode, op0, op1, 1);
91 if (tem)
92 return tem;
93 }
94
95 return gen_rtx_fmt_ee (code, mode, op0, op1);
96 }
97 \f
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
100 rtx
101 avoid_constant_pool_reference (rtx x)
102 {
103 rtx c, tmp, addr;
104 enum machine_mode cmode;
105
106 switch (GET_CODE (x))
107 {
108 case MEM:
109 break;
110
111 case FLOAT_EXTEND:
112 /* Handle float extensions of constant pool references. */
113 tmp = XEXP (x, 0);
114 c = avoid_constant_pool_reference (tmp);
115 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
116 {
117 REAL_VALUE_TYPE d;
118
119 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
121 }
122 return x;
123
124 default:
125 return x;
126 }
127
128 addr = XEXP (x, 0);
129
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr = (*targetm.delegitimize_address) (addr);
132
133 if (GET_CODE (addr) == LO_SUM)
134 addr = XEXP (addr, 1);
135
136 if (GET_CODE (addr) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr))
138 return x;
139
140 c = get_pool_constant (addr);
141 cmode = get_pool_mode (addr);
142
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode != GET_MODE (x))
147 {
148 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
149 return c ? c : x;
150 }
151
152 return c;
153 }
154 \f
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
157
158 rtx
159 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
160 enum machine_mode op_mode)
161 {
162 rtx tem;
163
164 /* If this simplifies, use it. */
165 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
166 return tem;
167
168 return gen_rtx_fmt_e (code, mode, op);
169 }
170
171 /* Likewise for ternary operations. */
172
173 rtx
174 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
175 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
176 {
177 rtx tem;
178
179 /* If this simplifies, use it. */
180 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
181 op0, op1, op2)))
182 return tem;
183
184 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
185 }
186 \f
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
189 */
190
191 rtx
192 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
193 enum machine_mode cmp_mode, rtx op0, rtx op1)
194 {
195 rtx tem;
196
197 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
198 return tem;
199
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
202 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
203
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
206 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
207
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code == NE && op1 == const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
211 return op0;
212 else if (code == EQ && op1 == const0_rtx)
213 {
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
216 if (new != UNKNOWN)
217 {
218 code = new;
219 mode = cmp_mode;
220 op1 = XEXP (op0, 1);
221 op0 = XEXP (op0, 0);
222 }
223 }
224
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0, op1))
227 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
228
229 return gen_rtx_fmt_ee (code, mode, op0, op1);
230 }
231 \f
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
234
235 rtx
236 simplify_replace_rtx (rtx x, rtx old, rtx new)
237 {
238 enum rtx_code code = GET_CODE (x);
239 enum machine_mode mode = GET_MODE (x);
240
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
244
245 if (x == old)
246 return new;
247
248 switch (GET_RTX_CLASS (code))
249 {
250 case '1':
251 {
252 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
253 rtx op = (XEXP (x, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
255
256 return simplify_gen_unary (code, mode, op, op_mode);
257 }
258
259 case '2':
260 case 'c':
261 return
262 simplify_gen_binary (code, mode,
263 simplify_replace_rtx (XEXP (x, 0), old, new),
264 simplify_replace_rtx (XEXP (x, 1), old, new));
265 case '<':
266 {
267 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x, 0))
269 : GET_MODE (XEXP (x, 1)));
270 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
271 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
272 rtx temp = simplify_gen_relational (code, mode,
273 (op_mode != VOIDmode
274 ? op_mode
275 : GET_MODE (op0) != VOIDmode
276 ? GET_MODE (op0)
277 : GET_MODE (op1)),
278 op0, op1);
279 #ifdef FLOAT_STORE_FLAG_VALUE
280 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
281 {
282 if (temp == const0_rtx)
283 temp = CONST0_RTX (mode);
284 else if (temp == const_true_rtx)
285 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
286 mode);
287 }
288 #endif
289 return temp;
290 }
291
292 case '3':
293 case 'b':
294 {
295 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
296 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
297
298 return
299 simplify_gen_ternary (code, mode,
300 (op_mode != VOIDmode
301 ? op_mode
302 : GET_MODE (op0)),
303 op0,
304 simplify_replace_rtx (XEXP (x, 1), old, new),
305 simplify_replace_rtx (XEXP (x, 2), old, new));
306 }
307
308 case 'x':
309 /* The only case we try to handle is a SUBREG. */
310 if (code == SUBREG)
311 {
312 rtx exp;
313 exp = simplify_gen_subreg (GET_MODE (x),
314 simplify_replace_rtx (SUBREG_REG (x),
315 old, new),
316 GET_MODE (SUBREG_REG (x)),
317 SUBREG_BYTE (x));
318 if (exp)
319 x = exp;
320 }
321 return x;
322
323 case 'o':
324 if (code == MEM)
325 return replace_equiv_address_nv (x,
326 simplify_replace_rtx (XEXP (x, 0),
327 old, new));
328 else if (code == LO_SUM)
329 {
330 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
331 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
332
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
336
337 return gen_rtx_LO_SUM (mode, op0, op1);
338 }
339 else if (code == REG)
340 {
341 if (REG_P (old) && REGNO (x) == REGNO (old))
342 return new;
343 }
344
345 return x;
346
347 default:
348 return x;
349 }
350 return x;
351 }
352 \f
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
356 rtx
357 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
358 rtx op, enum machine_mode op_mode)
359 {
360 unsigned int width = GET_MODE_BITSIZE (mode);
361 rtx trueop = avoid_constant_pool_reference (op);
362
363 if (code == VEC_DUPLICATE)
364 {
365 if (!VECTOR_MODE_P (mode))
366 abort ();
367 if (GET_MODE (trueop) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop))
369 && GET_MODE_INNER (mode) != GET_MODE (trueop))
370 abort ();
371 if (GET_MODE (trueop) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop))
373 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
374 abort ();
375 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
376 || GET_CODE (trueop) == CONST_VECTOR)
377 {
378 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
379 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
380 rtvec v = rtvec_alloc (n_elts);
381 unsigned int i;
382
383 if (GET_CODE (trueop) != CONST_VECTOR)
384 for (i = 0; i < n_elts; i++)
385 RTVEC_ELT (v, i) = trueop;
386 else
387 {
388 enum machine_mode inmode = GET_MODE (trueop);
389 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
390 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
391
392 if (in_n_elts >= n_elts || n_elts % in_n_elts)
393 abort ();
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
396 }
397 return gen_rtx_CONST_VECTOR (mode, v);
398 }
399 }
400
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 {
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
410
411 if (op_n_elts != n_elts)
412 abort ();
413
414 for (i = 0; i < n_elts; i++)
415 {
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
422 }
423 return gen_rtx_CONST_VECTOR (mode, v);
424 }
425
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
429
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
432 {
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
435
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
440
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
444 }
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 if (op_mode == VOIDmode)
458 {
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
463 }
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
465 ;
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
468
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
472 }
473
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
476 {
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
479
480 switch (code)
481 {
482 case NOT:
483 val = ~ arg0;
484 break;
485
486 case NEG:
487 val = - arg0;
488 break;
489
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
493
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
500
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
504 ;
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
508
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
512 {
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
517 }
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
521
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
528
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
536
537 case TRUNCATE:
538 val = arg0;
539 break;
540
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
547 {
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
554 }
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
560
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
565 {
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
572 }
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574 {
575 val
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
580 }
581 else
582 return 0;
583 break;
584
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
591
592 default:
593 abort ();
594 }
595
596 val = trunc_int_for_mode (val, mode);
597
598 return GEN_INT (val);
599 }
600
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
607 {
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
610
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
615
616 switch (code)
617 {
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
622
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
626
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
633
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
637 {
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
642 }
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
646
647 case CLZ:
648 hv = 0;
649 if (h1 == 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
651 else
652 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
653 - HOST_BITS_PER_WIDE_INT;
654 break;
655
656 case CTZ:
657 hv = 0;
658 if (l1 == 0)
659 {
660 if (h1 == 0)
661 lv = GET_MODE_BITSIZE (mode);
662 else
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 }
665 else
666 lv = exact_log2 (l1 & -l1);
667 break;
668
669 case POPCOUNT:
670 hv = 0;
671 lv = 0;
672 while (l1)
673 lv++, l1 &= l1 - 1;
674 while (h1)
675 lv++, h1 &= h1 - 1;
676 break;
677
678 case PARITY:
679 hv = 0;
680 lv = 0;
681 while (l1)
682 lv++, l1 &= l1 - 1;
683 while (h1)
684 lv++, h1 &= h1 - 1;
685 lv &= 1;
686 break;
687
688 case TRUNCATE:
689 /* This is just a change-of-mode, so do nothing. */
690 lv = l1, hv = h1;
691 break;
692
693 case ZERO_EXTEND:
694 if (op_mode == VOIDmode)
695 abort ();
696
697 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
698 return 0;
699
700 hv = 0;
701 lv = l1 & GET_MODE_MASK (op_mode);
702 break;
703
704 case SIGN_EXTEND:
705 if (op_mode == VOIDmode
706 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
707 return 0;
708 else
709 {
710 lv = l1 & GET_MODE_MASK (op_mode);
711 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
712 && (lv & ((HOST_WIDE_INT) 1
713 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
714 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715
716 hv = HWI_SIGN_EXTEND (lv);
717 }
718 break;
719
720 case SQRT:
721 return 0;
722
723 default:
724 return 0;
725 }
726
727 return immed_double_const (lv, hv, mode);
728 }
729
730 else if (GET_CODE (trueop) == CONST_DOUBLE
731 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 {
733 REAL_VALUE_TYPE d, t;
734 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
735
736 switch (code)
737 {
738 case SQRT:
739 if (HONOR_SNANS (mode) && real_isnan (&d))
740 return 0;
741 real_sqrt (&t, mode, &d);
742 d = t;
743 break;
744 case ABS:
745 d = REAL_VALUE_ABS (d);
746 break;
747 case NEG:
748 d = REAL_VALUE_NEGATE (d);
749 break;
750 case FLOAT_TRUNCATE:
751 d = real_value_truncate (mode, d);
752 break;
753 case FLOAT_EXTEND:
754 /* All this does is change the mode. */
755 break;
756 case FIX:
757 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
758 break;
759
760 default:
761 abort ();
762 }
763 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
764 }
765
766 else if (GET_CODE (trueop) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode) == MODE_INT
769 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
770 {
771 HOST_WIDE_INT i;
772 REAL_VALUE_TYPE d;
773 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
774 switch (code)
775 {
776 case FIX: i = REAL_VALUE_FIX (d); break;
777 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
778 default:
779 abort ();
780 }
781 return gen_int_mode (i, mode);
782 }
783
784 /* This was formerly used only for non-IEEE float.
785 eggert@twinsun.com says it is safe for IEEE also. */
786 else
787 {
788 enum rtx_code reversed;
789 /* There are some simplifications we can do even if the operands
790 aren't constant. */
791 switch (code)
792 {
793 case NOT:
794 /* (not (not X)) == X. */
795 if (GET_CODE (op) == NOT)
796 return XEXP (op, 0);
797
798 /* (not (eq X Y)) == (ne X Y), etc. */
799 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
800 && ((reversed = reversed_comparison_code (op, NULL_RTX))
801 != UNKNOWN))
802 return gen_rtx_fmt_ee (reversed,
803 op_mode, XEXP (op, 0), XEXP (op, 1));
804 break;
805
806 case NEG:
807 /* (neg (neg X)) == X. */
808 if (GET_CODE (op) == NEG)
809 return XEXP (op, 0);
810 break;
811
812 case SIGN_EXTEND:
813 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
814 becomes just the MINUS if its mode is MODE. This allows
815 folding switch statements on machines using casesi (such as
816 the VAX). */
817 if (GET_CODE (op) == TRUNCATE
818 && GET_MODE (XEXP (op, 0)) == mode
819 && GET_CODE (XEXP (op, 0)) == MINUS
820 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
821 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
822 return XEXP (op, 0);
823
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && GET_CODE (SUBREG_REG (op)) == REG
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
835
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
837 case ZERO_EXTEND:
838 if (POINTERS_EXTEND_UNSIGNED > 0
839 && mode == Pmode && GET_MODE (op) == ptr_mode
840 && (CONSTANT_P (op)
841 || (GET_CODE (op) == SUBREG
842 && GET_CODE (SUBREG_REG (op)) == REG
843 && REG_POINTER (SUBREG_REG (op))
844 && GET_MODE (SUBREG_REG (op)) == Pmode)))
845 return convert_memory_address (Pmode, op);
846 break;
847 #endif
848
849 default:
850 break;
851 }
852
853 return 0;
854 }
855 }
856 \f
857 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
858 and OP1. Return 0 if no simplification is possible.
859
860 Don't use this for relational operations such as EQ or LT.
861 Use simplify_relational_operation instead. */
862 rtx
863 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
864 rtx op0, rtx op1)
865 {
866 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
867 HOST_WIDE_INT val;
868 unsigned int width = GET_MODE_BITSIZE (mode);
869 rtx tem;
870 rtx trueop0 = avoid_constant_pool_reference (op0);
871 rtx trueop1 = avoid_constant_pool_reference (op1);
872
873 /* Relational operations don't work here. We must know the mode
874 of the operands in order to do the comparison correctly.
875 Assuming a full word can give incorrect results.
876 Consider comparing 128 with -128 in QImode. */
877
878 if (GET_RTX_CLASS (code) == '<')
879 abort ();
880
881 /* Make sure the constant is second. */
882 if (GET_RTX_CLASS (code) == 'c'
883 && swap_commutative_operands_p (trueop0, trueop1))
884 {
885 tem = op0, op0 = op1, op1 = tem;
886 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
887 }
888
889 if (VECTOR_MODE_P (mode)
890 && GET_CODE (trueop0) == CONST_VECTOR
891 && GET_CODE (trueop1) == CONST_VECTOR)
892 {
893 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
894 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
895 enum machine_mode op0mode = GET_MODE (trueop0);
896 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
897 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
898 enum machine_mode op1mode = GET_MODE (trueop1);
899 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
900 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
901 rtvec v = rtvec_alloc (n_elts);
902 unsigned int i;
903
904 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
905 abort ();
906
907 for (i = 0; i < n_elts; i++)
908 {
909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
910 CONST_VECTOR_ELT (trueop0, i),
911 CONST_VECTOR_ELT (trueop1, i));
912 if (!x)
913 return 0;
914 RTVEC_ELT (v, i) = x;
915 }
916
917 return gen_rtx_CONST_VECTOR (mode, v);
918 }
919
920 if (GET_MODE_CLASS (mode) == MODE_FLOAT
921 && GET_CODE (trueop0) == CONST_DOUBLE
922 && GET_CODE (trueop1) == CONST_DOUBLE
923 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
924 {
925 REAL_VALUE_TYPE f0, f1, value;
926
927 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
928 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
929 f0 = real_value_truncate (mode, f0);
930 f1 = real_value_truncate (mode, f1);
931
932 if (code == DIV
933 && !MODE_HAS_INFINITIES (mode)
934 && REAL_VALUES_EQUAL (f1, dconst0))
935 return 0;
936
937 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
938
939 value = real_value_truncate (mode, value);
940 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
941 }
942
943 /* We can fold some multi-word operations. */
944 if (GET_MODE_CLASS (mode) == MODE_INT
945 && width == HOST_BITS_PER_WIDE_INT * 2
946 && (GET_CODE (trueop0) == CONST_DOUBLE
947 || GET_CODE (trueop0) == CONST_INT)
948 && (GET_CODE (trueop1) == CONST_DOUBLE
949 || GET_CODE (trueop1) == CONST_INT))
950 {
951 unsigned HOST_WIDE_INT l1, l2, lv;
952 HOST_WIDE_INT h1, h2, hv;
953
954 if (GET_CODE (trueop0) == CONST_DOUBLE)
955 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
956 else
957 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
958
959 if (GET_CODE (trueop1) == CONST_DOUBLE)
960 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
961 else
962 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
963
964 switch (code)
965 {
966 case MINUS:
967 /* A - B == A + (-B). */
968 neg_double (l2, h2, &lv, &hv);
969 l2 = lv, h2 = hv;
970
971 /* .. fall through ... */
972
973 case PLUS:
974 add_double (l1, h1, l2, h2, &lv, &hv);
975 break;
976
977 case MULT:
978 mul_double (l1, h1, l2, h2, &lv, &hv);
979 break;
980
981 case DIV: case MOD: case UDIV: case UMOD:
982 /* We'd need to include tree.h to do this and it doesn't seem worth
983 it. */
984 return 0;
985
986 case AND:
987 lv = l1 & l2, hv = h1 & h2;
988 break;
989
990 case IOR:
991 lv = l1 | l2, hv = h1 | h2;
992 break;
993
994 case XOR:
995 lv = l1 ^ l2, hv = h1 ^ h2;
996 break;
997
998 case SMIN:
999 if (h1 < h2
1000 || (h1 == h2
1001 && ((unsigned HOST_WIDE_INT) l1
1002 < (unsigned HOST_WIDE_INT) l2)))
1003 lv = l1, hv = h1;
1004 else
1005 lv = l2, hv = h2;
1006 break;
1007
1008 case SMAX:
1009 if (h1 > h2
1010 || (h1 == h2
1011 && ((unsigned HOST_WIDE_INT) l1
1012 > (unsigned HOST_WIDE_INT) l2)))
1013 lv = l1, hv = h1;
1014 else
1015 lv = l2, hv = h2;
1016 break;
1017
1018 case UMIN:
1019 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1020 || (h1 == h2
1021 && ((unsigned HOST_WIDE_INT) l1
1022 < (unsigned HOST_WIDE_INT) l2)))
1023 lv = l1, hv = h1;
1024 else
1025 lv = l2, hv = h2;
1026 break;
1027
1028 case UMAX:
1029 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1030 || (h1 == h2
1031 && ((unsigned HOST_WIDE_INT) l1
1032 > (unsigned HOST_WIDE_INT) l2)))
1033 lv = l1, hv = h1;
1034 else
1035 lv = l2, hv = h2;
1036 break;
1037
1038 case LSHIFTRT: case ASHIFTRT:
1039 case ASHIFT:
1040 case ROTATE: case ROTATERT:
1041 #ifdef SHIFT_COUNT_TRUNCATED
1042 if (SHIFT_COUNT_TRUNCATED)
1043 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1044 #endif
1045
1046 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1047 return 0;
1048
1049 if (code == LSHIFTRT || code == ASHIFTRT)
1050 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1051 code == ASHIFTRT);
1052 else if (code == ASHIFT)
1053 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1054 else if (code == ROTATE)
1055 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1056 else /* code == ROTATERT */
1057 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1058 break;
1059
1060 default:
1061 return 0;
1062 }
1063
1064 return immed_double_const (lv, hv, mode);
1065 }
1066
1067 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1068 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1069 {
1070 /* Even if we can't compute a constant result,
1071 there are some cases worth simplifying. */
1072
1073 switch (code)
1074 {
1075 case PLUS:
1076 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1077 when x is NaN, infinite, or finite and nonzero. They aren't
1078 when x is -0 and the rounding mode is not towards -infinity,
1079 since (-0) + 0 is then 0. */
1080 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1081 return op0;
1082
1083 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1084 transformations are safe even for IEEE. */
1085 if (GET_CODE (op0) == NEG)
1086 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1087 else if (GET_CODE (op1) == NEG)
1088 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1089
1090 /* (~a) + 1 -> -a */
1091 if (INTEGRAL_MODE_P (mode)
1092 && GET_CODE (op0) == NOT
1093 && trueop1 == const1_rtx)
1094 return gen_rtx_NEG (mode, XEXP (op0, 0));
1095
1096 /* Handle both-operands-constant cases. We can only add
1097 CONST_INTs to constants since the sum of relocatable symbols
1098 can't be handled by most assemblers. Don't add CONST_INT
1099 to CONST_INT since overflow won't be computed properly if wider
1100 than HOST_BITS_PER_WIDE_INT. */
1101
1102 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1103 && GET_CODE (op1) == CONST_INT)
1104 return plus_constant (op0, INTVAL (op1));
1105 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1106 && GET_CODE (op0) == CONST_INT)
1107 return plus_constant (op1, INTVAL (op0));
1108
1109 /* See if this is something like X * C - X or vice versa or
1110 if the multiplication is written as a shift. If so, we can
1111 distribute and make a new multiply, shift, or maybe just
1112 have X (if C is 2 in the example above). But don't make
1113 real multiply if we didn't have one before. */
1114
1115 if (! FLOAT_MODE_P (mode))
1116 {
1117 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1118 rtx lhs = op0, rhs = op1;
1119 int had_mult = 0;
1120
1121 if (GET_CODE (lhs) == NEG)
1122 coeff0 = -1, lhs = XEXP (lhs, 0);
1123 else if (GET_CODE (lhs) == MULT
1124 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1125 {
1126 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1127 had_mult = 1;
1128 }
1129 else if (GET_CODE (lhs) == ASHIFT
1130 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1131 && INTVAL (XEXP (lhs, 1)) >= 0
1132 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1133 {
1134 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1135 lhs = XEXP (lhs, 0);
1136 }
1137
1138 if (GET_CODE (rhs) == NEG)
1139 coeff1 = -1, rhs = XEXP (rhs, 0);
1140 else if (GET_CODE (rhs) == MULT
1141 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1142 {
1143 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1144 had_mult = 1;
1145 }
1146 else if (GET_CODE (rhs) == ASHIFT
1147 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1148 && INTVAL (XEXP (rhs, 1)) >= 0
1149 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1150 {
1151 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1152 rhs = XEXP (rhs, 0);
1153 }
1154
1155 if (rtx_equal_p (lhs, rhs))
1156 {
1157 tem = simplify_gen_binary (MULT, mode, lhs,
1158 GEN_INT (coeff0 + coeff1));
1159 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1160 }
1161 }
1162
1163 /* If one of the operands is a PLUS or a MINUS, see if we can
1164 simplify this by the associative law.
1165 Don't use the associative law for floating point.
1166 The inaccuracy makes it nonassociative,
1167 and subtle programs can break if operations are associated. */
1168
1169 if (INTEGRAL_MODE_P (mode)
1170 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1171 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1172 || (GET_CODE (op0) == CONST
1173 && GET_CODE (XEXP (op0, 0)) == PLUS)
1174 || (GET_CODE (op1) == CONST
1175 && GET_CODE (XEXP (op1, 0)) == PLUS))
1176 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1177 return tem;
1178 break;
1179
1180 case COMPARE:
1181 #ifdef HAVE_cc0
1182 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1183 using cc0, in which case we want to leave it as a COMPARE
1184 so we can distinguish it from a register-register-copy.
1185
1186 In IEEE floating point, x-0 is not the same as x. */
1187
1188 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1189 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1190 && trueop1 == CONST0_RTX (mode))
1191 return op0;
1192 #endif
1193
1194 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1195 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1196 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1197 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1198 {
1199 rtx xop00 = XEXP (op0, 0);
1200 rtx xop10 = XEXP (op1, 0);
1201
1202 #ifdef HAVE_cc0
1203 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1204 #else
1205 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1206 && GET_MODE (xop00) == GET_MODE (xop10)
1207 && REGNO (xop00) == REGNO (xop10)
1208 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1209 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1210 #endif
1211 return xop00;
1212 }
1213 break;
1214
1215 case MINUS:
1216 /* We can't assume x-x is 0 even with non-IEEE floating point,
1217 but since it is zero except in very strange circumstances, we
1218 will treat it as zero with -funsafe-math-optimizations. */
1219 if (rtx_equal_p (trueop0, trueop1)
1220 && ! side_effects_p (op0)
1221 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1222 return CONST0_RTX (mode);
1223
1224 /* Change subtraction from zero into negation. (0 - x) is the
1225 same as -x when x is NaN, infinite, or finite and nonzero.
1226 But if the mode has signed zeros, and does not round towards
1227 -infinity, then 0 - 0 is 0, not -0. */
1228 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1229 return gen_rtx_NEG (mode, op1);
1230
1231 /* (-1 - a) is ~a. */
1232 if (trueop0 == constm1_rtx)
1233 return gen_rtx_NOT (mode, op1);
1234
1235 /* Subtracting 0 has no effect unless the mode has signed zeros
1236 and supports rounding towards -infinity. In such a case,
1237 0 - 0 is -0. */
1238 if (!(HONOR_SIGNED_ZEROS (mode)
1239 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1240 && trueop1 == CONST0_RTX (mode))
1241 return op0;
1242
1243 /* See if this is something like X * C - X or vice versa or
1244 if the multiplication is written as a shift. If so, we can
1245 distribute and make a new multiply, shift, or maybe just
1246 have X (if C is 2 in the example above). But don't make
1247 real multiply if we didn't have one before. */
1248
1249 if (! FLOAT_MODE_P (mode))
1250 {
1251 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1252 rtx lhs = op0, rhs = op1;
1253 int had_mult = 0;
1254
1255 if (GET_CODE (lhs) == NEG)
1256 coeff0 = -1, lhs = XEXP (lhs, 0);
1257 else if (GET_CODE (lhs) == MULT
1258 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1259 {
1260 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1261 had_mult = 1;
1262 }
1263 else if (GET_CODE (lhs) == ASHIFT
1264 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1265 && INTVAL (XEXP (lhs, 1)) >= 0
1266 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1267 {
1268 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1269 lhs = XEXP (lhs, 0);
1270 }
1271
1272 if (GET_CODE (rhs) == NEG)
1273 coeff1 = - 1, rhs = XEXP (rhs, 0);
1274 else if (GET_CODE (rhs) == MULT
1275 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1276 {
1277 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1278 had_mult = 1;
1279 }
1280 else if (GET_CODE (rhs) == ASHIFT
1281 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1282 && INTVAL (XEXP (rhs, 1)) >= 0
1283 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1284 {
1285 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1286 rhs = XEXP (rhs, 0);
1287 }
1288
1289 if (rtx_equal_p (lhs, rhs))
1290 {
1291 tem = simplify_gen_binary (MULT, mode, lhs,
1292 GEN_INT (coeff0 - coeff1));
1293 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1294 }
1295 }
1296
1297 /* (a - (-b)) -> (a + b). True even for IEEE. */
1298 if (GET_CODE (op1) == NEG)
1299 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1300
1301 /* If one of the operands is a PLUS or a MINUS, see if we can
1302 simplify this by the associative law.
1303 Don't use the associative law for floating point.
1304 The inaccuracy makes it nonassociative,
1305 and subtle programs can break if operations are associated. */
1306
1307 if (INTEGRAL_MODE_P (mode)
1308 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1309 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1310 || (GET_CODE (op0) == CONST
1311 && GET_CODE (XEXP (op0, 0)) == PLUS)
1312 || (GET_CODE (op1) == CONST
1313 && GET_CODE (XEXP (op1, 0)) == PLUS))
1314 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1315 return tem;
1316
1317 /* Don't let a relocatable value get a negative coeff. */
1318 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1319 return simplify_gen_binary (PLUS, mode,
1320 op0,
1321 neg_const_int (mode, op1));
1322
1323 /* (x - (x & y)) -> (x & ~y) */
1324 if (GET_CODE (op1) == AND)
1325 {
1326 if (rtx_equal_p (op0, XEXP (op1, 0)))
1327 {
1328 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1329 GET_MODE (XEXP (op1, 1)));
1330 return simplify_gen_binary (AND, mode, op0, tem);
1331 }
1332 if (rtx_equal_p (op0, XEXP (op1, 1)))
1333 {
1334 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1335 GET_MODE (XEXP (op1, 0)));
1336 return simplify_gen_binary (AND, mode, op0, tem);
1337 }
1338 }
1339 break;
1340
1341 case MULT:
1342 if (trueop1 == constm1_rtx)
1343 {
1344 tem = simplify_unary_operation (NEG, mode, op0, mode);
1345
1346 return tem ? tem : gen_rtx_NEG (mode, op0);
1347 }
1348
1349 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1350 x is NaN, since x * 0 is then also NaN. Nor is it valid
1351 when the mode has signed zeros, since multiplying a negative
1352 number by 0 will give -0, not 0. */
1353 if (!HONOR_NANS (mode)
1354 && !HONOR_SIGNED_ZEROS (mode)
1355 && trueop1 == CONST0_RTX (mode)
1356 && ! side_effects_p (op0))
1357 return op1;
1358
1359 /* In IEEE floating point, x*1 is not equivalent to x for
1360 signalling NaNs. */
1361 if (!HONOR_SNANS (mode)
1362 && trueop1 == CONST1_RTX (mode))
1363 return op0;
1364
1365 /* Convert multiply by constant power of two into shift unless
1366 we are still generating RTL. This test is a kludge. */
1367 if (GET_CODE (trueop1) == CONST_INT
1368 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1369 /* If the mode is larger than the host word size, and the
1370 uppermost bit is set, then this isn't a power of two due
1371 to implicit sign extension. */
1372 && (width <= HOST_BITS_PER_WIDE_INT
1373 || val != HOST_BITS_PER_WIDE_INT - 1)
1374 && ! rtx_equal_function_value_matters)
1375 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1376
1377 /* x*2 is x+x and x*(-1) is -x */
1378 if (GET_CODE (trueop1) == CONST_DOUBLE
1379 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1380 && GET_MODE (op0) == mode)
1381 {
1382 REAL_VALUE_TYPE d;
1383 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1384
1385 if (REAL_VALUES_EQUAL (d, dconst2))
1386 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1387
1388 if (REAL_VALUES_EQUAL (d, dconstm1))
1389 return gen_rtx_NEG (mode, op0);
1390 }
1391 break;
1392
1393 case IOR:
1394 if (trueop1 == const0_rtx)
1395 return op0;
1396 if (GET_CODE (trueop1) == CONST_INT
1397 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1398 == GET_MODE_MASK (mode)))
1399 return op1;
1400 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1401 return op0;
1402 /* A | (~A) -> -1 */
1403 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1404 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1405 && ! side_effects_p (op0)
1406 && GET_MODE_CLASS (mode) != MODE_CC)
1407 return constm1_rtx;
1408 break;
1409
1410 case XOR:
1411 if (trueop1 == const0_rtx)
1412 return op0;
1413 if (GET_CODE (trueop1) == CONST_INT
1414 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1415 == GET_MODE_MASK (mode)))
1416 return gen_rtx_NOT (mode, op0);
1417 if (trueop0 == trueop1 && ! side_effects_p (op0)
1418 && GET_MODE_CLASS (mode) != MODE_CC)
1419 return const0_rtx;
1420 break;
1421
1422 case AND:
1423 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1424 return const0_rtx;
1425 if (GET_CODE (trueop1) == CONST_INT
1426 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1427 == GET_MODE_MASK (mode)))
1428 return op0;
1429 if (trueop0 == trueop1 && ! side_effects_p (op0)
1430 && GET_MODE_CLASS (mode) != MODE_CC)
1431 return op0;
1432 /* A & (~A) -> 0 */
1433 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1434 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1435 && ! side_effects_p (op0)
1436 && GET_MODE_CLASS (mode) != MODE_CC)
1437 return const0_rtx;
1438 break;
1439
1440 case UDIV:
1441 /* Convert divide by power of two into shift (divide by 1 handled
1442 below). */
1443 if (GET_CODE (trueop1) == CONST_INT
1444 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1445 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1446
1447 /* ... fall through ... */
1448
1449 case DIV:
1450 if (trueop1 == CONST1_RTX (mode))
1451 {
1452 /* On some platforms DIV uses narrower mode than its
1453 operands. */
1454 rtx x = gen_lowpart_common (mode, op0);
1455 if (x)
1456 return x;
1457 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1458 return gen_lowpart_SUBREG (mode, op0);
1459 else
1460 return op0;
1461 }
1462
1463 /* Maybe change 0 / x to 0. This transformation isn't safe for
1464 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1465 Nor is it safe for modes with signed zeros, since dividing
1466 0 by a negative number gives -0, not 0. */
1467 if (!HONOR_NANS (mode)
1468 && !HONOR_SIGNED_ZEROS (mode)
1469 && trueop0 == CONST0_RTX (mode)
1470 && ! side_effects_p (op1))
1471 return op0;
1472
1473 /* Change division by a constant into multiplication. Only do
1474 this with -funsafe-math-optimizations. */
1475 else if (GET_CODE (trueop1) == CONST_DOUBLE
1476 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1477 && trueop1 != CONST0_RTX (mode)
1478 && flag_unsafe_math_optimizations)
1479 {
1480 REAL_VALUE_TYPE d;
1481 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1482
1483 if (! REAL_VALUES_EQUAL (d, dconst0))
1484 {
1485 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1486 return gen_rtx_MULT (mode, op0,
1487 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1488 }
1489 }
1490 break;
1491
1492 case UMOD:
1493 /* Handle modulus by power of two (mod with 1 handled below). */
1494 if (GET_CODE (trueop1) == CONST_INT
1495 && exact_log2 (INTVAL (trueop1)) > 0)
1496 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1497
1498 /* ... fall through ... */
1499
1500 case MOD:
1501 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1502 && ! side_effects_p (op0) && ! side_effects_p (op1))
1503 return const0_rtx;
1504 break;
1505
1506 case ROTATERT:
1507 case ROTATE:
1508 case ASHIFTRT:
1509 /* Rotating ~0 always results in ~0. */
1510 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1511 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1512 && ! side_effects_p (op1))
1513 return op0;
1514
1515 /* ... fall through ... */
1516
1517 case ASHIFT:
1518 case LSHIFTRT:
1519 if (trueop1 == const0_rtx)
1520 return op0;
1521 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1522 return op0;
1523 break;
1524
1525 case SMIN:
1526 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1527 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1528 && ! side_effects_p (op0))
1529 return op1;
1530 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1531 return op0;
1532 break;
1533
1534 case SMAX:
1535 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1536 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1537 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1538 && ! side_effects_p (op0))
1539 return op1;
1540 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1541 return op0;
1542 break;
1543
1544 case UMIN:
1545 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1546 return op1;
1547 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1548 return op0;
1549 break;
1550
1551 case UMAX:
1552 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1553 return op1;
1554 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1555 return op0;
1556 break;
1557
1558 case SS_PLUS:
1559 case US_PLUS:
1560 case SS_MINUS:
1561 case US_MINUS:
1562 /* ??? There are simplifications that can be done. */
1563 return 0;
1564
1565 case VEC_SELECT:
1566 if (!VECTOR_MODE_P (mode))
1567 {
1568 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1569 || (mode
1570 != GET_MODE_INNER (GET_MODE (trueop0)))
1571 || GET_CODE (trueop1) != PARALLEL
1572 || XVECLEN (trueop1, 0) != 1
1573 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1574 abort ();
1575
1576 if (GET_CODE (trueop0) == CONST_VECTOR)
1577 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1578 }
1579 else
1580 {
1581 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1582 || (GET_MODE_INNER (mode)
1583 != GET_MODE_INNER (GET_MODE (trueop0)))
1584 || GET_CODE (trueop1) != PARALLEL)
1585 abort ();
1586
1587 if (GET_CODE (trueop0) == CONST_VECTOR)
1588 {
1589 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1590 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1591 rtvec v = rtvec_alloc (n_elts);
1592 unsigned int i;
1593
1594 if (XVECLEN (trueop1, 0) != (int) n_elts)
1595 abort ();
1596 for (i = 0; i < n_elts; i++)
1597 {
1598 rtx x = XVECEXP (trueop1, 0, i);
1599
1600 if (GET_CODE (x) != CONST_INT)
1601 abort ();
1602 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1603 }
1604
1605 return gen_rtx_CONST_VECTOR (mode, v);
1606 }
1607 }
1608 return 0;
1609 case VEC_CONCAT:
1610 {
1611 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1612 ? GET_MODE (trueop0)
1613 : GET_MODE_INNER (mode));
1614 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1615 ? GET_MODE (trueop1)
1616 : GET_MODE_INNER (mode));
1617
1618 if (!VECTOR_MODE_P (mode)
1619 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1620 != GET_MODE_SIZE (mode)))
1621 abort ();
1622
1623 if ((VECTOR_MODE_P (op0_mode)
1624 && (GET_MODE_INNER (mode)
1625 != GET_MODE_INNER (op0_mode)))
1626 || (!VECTOR_MODE_P (op0_mode)
1627 && GET_MODE_INNER (mode) != op0_mode))
1628 abort ();
1629
1630 if ((VECTOR_MODE_P (op1_mode)
1631 && (GET_MODE_INNER (mode)
1632 != GET_MODE_INNER (op1_mode)))
1633 || (!VECTOR_MODE_P (op1_mode)
1634 && GET_MODE_INNER (mode) != op1_mode))
1635 abort ();
1636
1637 if ((GET_CODE (trueop0) == CONST_VECTOR
1638 || GET_CODE (trueop0) == CONST_INT
1639 || GET_CODE (trueop0) == CONST_DOUBLE)
1640 && (GET_CODE (trueop1) == CONST_VECTOR
1641 || GET_CODE (trueop1) == CONST_INT
1642 || GET_CODE (trueop1) == CONST_DOUBLE))
1643 {
1644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1646 rtvec v = rtvec_alloc (n_elts);
1647 unsigned int i;
1648 unsigned in_n_elts = 1;
1649
1650 if (VECTOR_MODE_P (op0_mode))
1651 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1652 for (i = 0; i < n_elts; i++)
1653 {
1654 if (i < in_n_elts)
1655 {
1656 if (!VECTOR_MODE_P (op0_mode))
1657 RTVEC_ELT (v, i) = trueop0;
1658 else
1659 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1660 }
1661 else
1662 {
1663 if (!VECTOR_MODE_P (op1_mode))
1664 RTVEC_ELT (v, i) = trueop1;
1665 else
1666 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1667 i - in_n_elts);
1668 }
1669 }
1670
1671 return gen_rtx_CONST_VECTOR (mode, v);
1672 }
1673 }
1674 return 0;
1675
1676 default:
1677 abort ();
1678 }
1679
1680 return 0;
1681 }
1682
1683 /* Get the integer argument values in two forms:
1684 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1685
1686 arg0 = INTVAL (trueop0);
1687 arg1 = INTVAL (trueop1);
1688
1689 if (width < HOST_BITS_PER_WIDE_INT)
1690 {
1691 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1692 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1693
1694 arg0s = arg0;
1695 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1696 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1697
1698 arg1s = arg1;
1699 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1700 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1701 }
1702 else
1703 {
1704 arg0s = arg0;
1705 arg1s = arg1;
1706 }
1707
1708 /* Compute the value of the arithmetic. */
1709
1710 switch (code)
1711 {
1712 case PLUS:
1713 val = arg0s + arg1s;
1714 break;
1715
1716 case MINUS:
1717 val = arg0s - arg1s;
1718 break;
1719
1720 case MULT:
1721 val = arg0s * arg1s;
1722 break;
1723
1724 case DIV:
1725 if (arg1s == 0
1726 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1727 && arg1s == -1))
1728 return 0;
1729 val = arg0s / arg1s;
1730 break;
1731
1732 case MOD:
1733 if (arg1s == 0
1734 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1735 && arg1s == -1))
1736 return 0;
1737 val = arg0s % arg1s;
1738 break;
1739
1740 case UDIV:
1741 if (arg1 == 0
1742 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1743 && arg1s == -1))
1744 return 0;
1745 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1746 break;
1747
1748 case UMOD:
1749 if (arg1 == 0
1750 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1751 && arg1s == -1))
1752 return 0;
1753 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1754 break;
1755
1756 case AND:
1757 val = arg0 & arg1;
1758 break;
1759
1760 case IOR:
1761 val = arg0 | arg1;
1762 break;
1763
1764 case XOR:
1765 val = arg0 ^ arg1;
1766 break;
1767
1768 case LSHIFTRT:
1769 /* If shift count is undefined, don't fold it; let the machine do
1770 what it wants. But truncate it if the machine will do that. */
1771 if (arg1 < 0)
1772 return 0;
1773
1774 #ifdef SHIFT_COUNT_TRUNCATED
1775 if (SHIFT_COUNT_TRUNCATED)
1776 arg1 %= width;
1777 #endif
1778
1779 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1780 break;
1781
1782 case ASHIFT:
1783 if (arg1 < 0)
1784 return 0;
1785
1786 #ifdef SHIFT_COUNT_TRUNCATED
1787 if (SHIFT_COUNT_TRUNCATED)
1788 arg1 %= width;
1789 #endif
1790
1791 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1792 break;
1793
1794 case ASHIFTRT:
1795 if (arg1 < 0)
1796 return 0;
1797
1798 #ifdef SHIFT_COUNT_TRUNCATED
1799 if (SHIFT_COUNT_TRUNCATED)
1800 arg1 %= width;
1801 #endif
1802
1803 val = arg0s >> arg1;
1804
1805 /* Bootstrap compiler may not have sign extended the right shift.
1806 Manually extend the sign to insure bootstrap cc matches gcc. */
1807 if (arg0s < 0 && arg1 > 0)
1808 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1809
1810 break;
1811
1812 case ROTATERT:
1813 if (arg1 < 0)
1814 return 0;
1815
1816 arg1 %= width;
1817 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1818 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1819 break;
1820
1821 case ROTATE:
1822 if (arg1 < 0)
1823 return 0;
1824
1825 arg1 %= width;
1826 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1827 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1828 break;
1829
1830 case COMPARE:
1831 /* Do nothing here. */
1832 return 0;
1833
1834 case SMIN:
1835 val = arg0s <= arg1s ? arg0s : arg1s;
1836 break;
1837
1838 case UMIN:
1839 val = ((unsigned HOST_WIDE_INT) arg0
1840 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1841 break;
1842
1843 case SMAX:
1844 val = arg0s > arg1s ? arg0s : arg1s;
1845 break;
1846
1847 case UMAX:
1848 val = ((unsigned HOST_WIDE_INT) arg0
1849 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1850 break;
1851
1852 case SS_PLUS:
1853 case US_PLUS:
1854 case SS_MINUS:
1855 case US_MINUS:
1856 /* ??? There are simplifications that can be done. */
1857 return 0;
1858
1859 default:
1860 abort ();
1861 }
1862
1863 val = trunc_int_for_mode (val, mode);
1864
1865 return GEN_INT (val);
1866 }
1867 \f
1868 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1869 PLUS or MINUS.
1870
1871 Rather than test for specific case, we do this by a brute-force method
1872 and do all possible simplifications until no more changes occur. Then
1873 we rebuild the operation.
1874
1875 If FORCE is true, then always generate the rtx. This is used to
1876 canonicalize stuff emitted from simplify_gen_binary. Note that this
1877 can still fail if the rtx is too complex. It won't fail just because
1878 the result is not 'simpler' than the input, however. */
1879
1880 struct simplify_plus_minus_op_data
1881 {
1882 rtx op;
1883 int neg;
1884 };
1885
1886 static int
1887 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
1888 {
1889 const struct simplify_plus_minus_op_data *d1 = p1;
1890 const struct simplify_plus_minus_op_data *d2 = p2;
1891
1892 return (commutative_operand_precedence (d2->op)
1893 - commutative_operand_precedence (d1->op));
1894 }
1895
1896 static rtx
1897 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
1898 rtx op1, int force)
1899 {
1900 struct simplify_plus_minus_op_data ops[8];
1901 rtx result, tem;
1902 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1903 int first, negate, changed;
1904 int i, j;
1905
1906 memset ((char *) ops, 0, sizeof ops);
1907
1908 /* Set up the two operands and then expand them until nothing has been
1909 changed. If we run out of room in our array, give up; this should
1910 almost never happen. */
1911
1912 ops[0].op = op0;
1913 ops[0].neg = 0;
1914 ops[1].op = op1;
1915 ops[1].neg = (code == MINUS);
1916
1917 do
1918 {
1919 changed = 0;
1920
1921 for (i = 0; i < n_ops; i++)
1922 {
1923 rtx this_op = ops[i].op;
1924 int this_neg = ops[i].neg;
1925 enum rtx_code this_code = GET_CODE (this_op);
1926
1927 switch (this_code)
1928 {
1929 case PLUS:
1930 case MINUS:
1931 if (n_ops == 7)
1932 return NULL_RTX;
1933
1934 ops[n_ops].op = XEXP (this_op, 1);
1935 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1936 n_ops++;
1937
1938 ops[i].op = XEXP (this_op, 0);
1939 input_ops++;
1940 changed = 1;
1941 break;
1942
1943 case NEG:
1944 ops[i].op = XEXP (this_op, 0);
1945 ops[i].neg = ! this_neg;
1946 changed = 1;
1947 break;
1948
1949 case CONST:
1950 if (n_ops < 7
1951 && GET_CODE (XEXP (this_op, 0)) == PLUS
1952 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1953 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1954 {
1955 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1956 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1957 ops[n_ops].neg = this_neg;
1958 n_ops++;
1959 input_consts++;
1960 changed = 1;
1961 }
1962 break;
1963
1964 case NOT:
1965 /* ~a -> (-a - 1) */
1966 if (n_ops != 7)
1967 {
1968 ops[n_ops].op = constm1_rtx;
1969 ops[n_ops++].neg = this_neg;
1970 ops[i].op = XEXP (this_op, 0);
1971 ops[i].neg = !this_neg;
1972 changed = 1;
1973 }
1974 break;
1975
1976 case CONST_INT:
1977 if (this_neg)
1978 {
1979 ops[i].op = neg_const_int (mode, this_op);
1980 ops[i].neg = 0;
1981 changed = 1;
1982 }
1983 break;
1984
1985 default:
1986 break;
1987 }
1988 }
1989 }
1990 while (changed);
1991
1992 /* If we only have two operands, we can't do anything. */
1993 if (n_ops <= 2 && !force)
1994 return NULL_RTX;
1995
1996 /* Count the number of CONSTs we didn't split above. */
1997 for (i = 0; i < n_ops; i++)
1998 if (GET_CODE (ops[i].op) == CONST)
1999 input_consts++;
2000
2001 /* Now simplify each pair of operands until nothing changes. The first
2002 time through just simplify constants against each other. */
2003
2004 first = 1;
2005 do
2006 {
2007 changed = first;
2008
2009 for (i = 0; i < n_ops - 1; i++)
2010 for (j = i + 1; j < n_ops; j++)
2011 {
2012 rtx lhs = ops[i].op, rhs = ops[j].op;
2013 int lneg = ops[i].neg, rneg = ops[j].neg;
2014
2015 if (lhs != 0 && rhs != 0
2016 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2017 {
2018 enum rtx_code ncode = PLUS;
2019
2020 if (lneg != rneg)
2021 {
2022 ncode = MINUS;
2023 if (lneg)
2024 tem = lhs, lhs = rhs, rhs = tem;
2025 }
2026 else if (swap_commutative_operands_p (lhs, rhs))
2027 tem = lhs, lhs = rhs, rhs = tem;
2028
2029 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2030
2031 /* Reject "simplifications" that just wrap the two
2032 arguments in a CONST. Failure to do so can result
2033 in infinite recursion with simplify_binary_operation
2034 when it calls us to simplify CONST operations. */
2035 if (tem
2036 && ! (GET_CODE (tem) == CONST
2037 && GET_CODE (XEXP (tem, 0)) == ncode
2038 && XEXP (XEXP (tem, 0), 0) == lhs
2039 && XEXP (XEXP (tem, 0), 1) == rhs)
2040 /* Don't allow -x + -1 -> ~x simplifications in the
2041 first pass. This allows us the chance to combine
2042 the -1 with other constants. */
2043 && ! (first
2044 && GET_CODE (tem) == NOT
2045 && XEXP (tem, 0) == rhs))
2046 {
2047 lneg &= rneg;
2048 if (GET_CODE (tem) == NEG)
2049 tem = XEXP (tem, 0), lneg = !lneg;
2050 if (GET_CODE (tem) == CONST_INT && lneg)
2051 tem = neg_const_int (mode, tem), lneg = 0;
2052
2053 ops[i].op = tem;
2054 ops[i].neg = lneg;
2055 ops[j].op = NULL_RTX;
2056 changed = 1;
2057 }
2058 }
2059 }
2060
2061 first = 0;
2062 }
2063 while (changed);
2064
2065 /* Pack all the operands to the lower-numbered entries. */
2066 for (i = 0, j = 0; j < n_ops; j++)
2067 if (ops[j].op)
2068 ops[i++] = ops[j];
2069 n_ops = i;
2070
2071 /* Sort the operations based on swap_commutative_operands_p. */
2072 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2073
2074 /* We suppressed creation of trivial CONST expressions in the
2075 combination loop to avoid recursion. Create one manually now.
2076 The combination loop should have ensured that there is exactly
2077 one CONST_INT, and the sort will have ensured that it is last
2078 in the array and that any other constant will be next-to-last. */
2079
2080 if (n_ops > 1
2081 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2082 && CONSTANT_P (ops[n_ops - 2].op))
2083 {
2084 rtx value = ops[n_ops - 1].op;
2085 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2086 value = neg_const_int (mode, value);
2087 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2088 n_ops--;
2089 }
2090
2091 /* Count the number of CONSTs that we generated. */
2092 n_consts = 0;
2093 for (i = 0; i < n_ops; i++)
2094 if (GET_CODE (ops[i].op) == CONST)
2095 n_consts++;
2096
2097 /* Give up if we didn't reduce the number of operands we had. Make
2098 sure we count a CONST as two operands. If we have the same
2099 number of operands, but have made more CONSTs than before, this
2100 is also an improvement, so accept it. */
2101 if (!force
2102 && (n_ops + n_consts > input_ops
2103 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2104 return NULL_RTX;
2105
2106 /* Put a non-negated operand first. If there aren't any, make all
2107 operands positive and negate the whole thing later. */
2108
2109 negate = 0;
2110 for (i = 0; i < n_ops && ops[i].neg; i++)
2111 continue;
2112 if (i == n_ops)
2113 {
2114 for (i = 0; i < n_ops; i++)
2115 ops[i].neg = 0;
2116 negate = 1;
2117 }
2118 else if (i != 0)
2119 {
2120 tem = ops[0].op;
2121 ops[0] = ops[i];
2122 ops[i].op = tem;
2123 ops[i].neg = 1;
2124 }
2125
2126 /* Now make the result by performing the requested operations. */
2127 result = ops[0].op;
2128 for (i = 1; i < n_ops; i++)
2129 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2130 mode, result, ops[i].op);
2131
2132 return negate ? gen_rtx_NEG (mode, result) : result;
2133 }
2134
2135 /* Like simplify_binary_operation except used for relational operators.
2136 MODE is the mode of the operands, not that of the result. If MODE
2137 is VOIDmode, both operands must also be VOIDmode and we compare the
2138 operands in "infinite precision".
2139
2140 If no simplification is possible, this function returns zero. Otherwise,
2141 it returns either const_true_rtx or const0_rtx. */
2142
2143 rtx
2144 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2145 rtx op0, rtx op1)
2146 {
2147 int equal, op0lt, op0ltu, op1lt, op1ltu;
2148 rtx tem;
2149 rtx trueop0;
2150 rtx trueop1;
2151
2152 if (mode == VOIDmode
2153 && (GET_MODE (op0) != VOIDmode
2154 || GET_MODE (op1) != VOIDmode))
2155 abort ();
2156
2157 /* If op0 is a compare, extract the comparison arguments from it. */
2158 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2159 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2160
2161 trueop0 = avoid_constant_pool_reference (op0);
2162 trueop1 = avoid_constant_pool_reference (op1);
2163
2164 /* We can't simplify MODE_CC values since we don't know what the
2165 actual comparison is. */
2166 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2167 return 0;
2168
2169 /* Make sure the constant is second. */
2170 if (swap_commutative_operands_p (trueop0, trueop1))
2171 {
2172 tem = op0, op0 = op1, op1 = tem;
2173 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2174 code = swap_condition (code);
2175 }
2176
2177 /* For integer comparisons of A and B maybe we can simplify A - B and can
2178 then simplify a comparison of that with zero. If A and B are both either
2179 a register or a CONST_INT, this can't help; testing for these cases will
2180 prevent infinite recursion here and speed things up.
2181
2182 If CODE is an unsigned comparison, then we can never do this optimization,
2183 because it gives an incorrect result if the subtraction wraps around zero.
2184 ANSI C defines unsigned operations such that they never overflow, and
2185 thus such cases can not be ignored. */
2186
2187 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2188 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2189 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2190 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2191 && code != GTU && code != GEU && code != LTU && code != LEU)
2192 return simplify_relational_operation (signed_condition (code),
2193 mode, tem, const0_rtx);
2194
2195 if (flag_unsafe_math_optimizations && code == ORDERED)
2196 return const_true_rtx;
2197
2198 if (flag_unsafe_math_optimizations && code == UNORDERED)
2199 return const0_rtx;
2200
2201 /* For modes without NaNs, if the two operands are equal, we know the
2202 result except if they have side-effects. */
2203 if (! HONOR_NANS (GET_MODE (trueop0))
2204 && rtx_equal_p (trueop0, trueop1)
2205 && ! side_effects_p (trueop0))
2206 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2207
2208 /* If the operands are floating-point constants, see if we can fold
2209 the result. */
2210 else if (GET_CODE (trueop0) == CONST_DOUBLE
2211 && GET_CODE (trueop1) == CONST_DOUBLE
2212 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2213 {
2214 REAL_VALUE_TYPE d0, d1;
2215
2216 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2217 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2218
2219 /* Comparisons are unordered iff at least one of the values is NaN. */
2220 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2221 switch (code)
2222 {
2223 case UNEQ:
2224 case UNLT:
2225 case UNGT:
2226 case UNLE:
2227 case UNGE:
2228 case NE:
2229 case UNORDERED:
2230 return const_true_rtx;
2231 case EQ:
2232 case LT:
2233 case GT:
2234 case LE:
2235 case GE:
2236 case LTGT:
2237 case ORDERED:
2238 return const0_rtx;
2239 default:
2240 return 0;
2241 }
2242
2243 equal = REAL_VALUES_EQUAL (d0, d1);
2244 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2245 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2246 }
2247
2248 /* Otherwise, see if the operands are both integers. */
2249 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2250 && (GET_CODE (trueop0) == CONST_DOUBLE
2251 || GET_CODE (trueop0) == CONST_INT)
2252 && (GET_CODE (trueop1) == CONST_DOUBLE
2253 || GET_CODE (trueop1) == CONST_INT))
2254 {
2255 int width = GET_MODE_BITSIZE (mode);
2256 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2257 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2258
2259 /* Get the two words comprising each integer constant. */
2260 if (GET_CODE (trueop0) == CONST_DOUBLE)
2261 {
2262 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2263 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2264 }
2265 else
2266 {
2267 l0u = l0s = INTVAL (trueop0);
2268 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2269 }
2270
2271 if (GET_CODE (trueop1) == CONST_DOUBLE)
2272 {
2273 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2274 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2275 }
2276 else
2277 {
2278 l1u = l1s = INTVAL (trueop1);
2279 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2280 }
2281
2282 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2283 we have to sign or zero-extend the values. */
2284 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2285 {
2286 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2287 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2288
2289 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2290 l0s |= ((HOST_WIDE_INT) (-1) << width);
2291
2292 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2293 l1s |= ((HOST_WIDE_INT) (-1) << width);
2294 }
2295 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2296 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2297
2298 equal = (h0u == h1u && l0u == l1u);
2299 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2300 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2301 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2302 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2303 }
2304
2305 /* Otherwise, there are some code-specific tests we can make. */
2306 else
2307 {
2308 switch (code)
2309 {
2310 case EQ:
2311 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2312 return const0_rtx;
2313 break;
2314
2315 case NE:
2316 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2317 return const_true_rtx;
2318 break;
2319
2320 case GEU:
2321 /* Unsigned values are never negative. */
2322 if (trueop1 == const0_rtx)
2323 return const_true_rtx;
2324 break;
2325
2326 case LTU:
2327 if (trueop1 == const0_rtx)
2328 return const0_rtx;
2329 break;
2330
2331 case LEU:
2332 /* Unsigned values are never greater than the largest
2333 unsigned value. */
2334 if (GET_CODE (trueop1) == CONST_INT
2335 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2336 && INTEGRAL_MODE_P (mode))
2337 return const_true_rtx;
2338 break;
2339
2340 case GTU:
2341 if (GET_CODE (trueop1) == CONST_INT
2342 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2343 && INTEGRAL_MODE_P (mode))
2344 return const0_rtx;
2345 break;
2346
2347 case LT:
2348 /* Optimize abs(x) < 0.0. */
2349 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2350 {
2351 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2352 : trueop0;
2353 if (GET_CODE (tem) == ABS)
2354 return const0_rtx;
2355 }
2356 break;
2357
2358 case GE:
2359 /* Optimize abs(x) >= 0.0. */
2360 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2361 {
2362 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2363 : trueop0;
2364 if (GET_CODE (tem) == ABS)
2365 return const_true_rtx;
2366 }
2367 break;
2368
2369 case UNGE:
2370 /* Optimize ! (abs(x) < 0.0). */
2371 if (trueop1 == CONST0_RTX (mode))
2372 {
2373 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2374 : trueop0;
2375 if (GET_CODE (tem) == ABS)
2376 return const_true_rtx;
2377 }
2378 break;
2379
2380 default:
2381 break;
2382 }
2383
2384 return 0;
2385 }
2386
2387 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2388 as appropriate. */
2389 switch (code)
2390 {
2391 case EQ:
2392 case UNEQ:
2393 return equal ? const_true_rtx : const0_rtx;
2394 case NE:
2395 case LTGT:
2396 return ! equal ? const_true_rtx : const0_rtx;
2397 case LT:
2398 case UNLT:
2399 return op0lt ? const_true_rtx : const0_rtx;
2400 case GT:
2401 case UNGT:
2402 return op1lt ? const_true_rtx : const0_rtx;
2403 case LTU:
2404 return op0ltu ? const_true_rtx : const0_rtx;
2405 case GTU:
2406 return op1ltu ? const_true_rtx : const0_rtx;
2407 case LE:
2408 case UNLE:
2409 return equal || op0lt ? const_true_rtx : const0_rtx;
2410 case GE:
2411 case UNGE:
2412 return equal || op1lt ? const_true_rtx : const0_rtx;
2413 case LEU:
2414 return equal || op0ltu ? const_true_rtx : const0_rtx;
2415 case GEU:
2416 return equal || op1ltu ? const_true_rtx : const0_rtx;
2417 case ORDERED:
2418 return const_true_rtx;
2419 case UNORDERED:
2420 return const0_rtx;
2421 default:
2422 abort ();
2423 }
2424 }
2425 \f
2426 /* Simplify CODE, an operation with result mode MODE and three operands,
2427 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2428 a constant. Return 0 if no simplifications is possible. */
2429
2430 rtx
2431 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2432 enum machine_mode op0_mode, rtx op0, rtx op1,
2433 rtx op2)
2434 {
2435 unsigned int width = GET_MODE_BITSIZE (mode);
2436
2437 /* VOIDmode means "infinite" precision. */
2438 if (width == 0)
2439 width = HOST_BITS_PER_WIDE_INT;
2440
2441 switch (code)
2442 {
2443 case SIGN_EXTRACT:
2444 case ZERO_EXTRACT:
2445 if (GET_CODE (op0) == CONST_INT
2446 && GET_CODE (op1) == CONST_INT
2447 && GET_CODE (op2) == CONST_INT
2448 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2449 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2450 {
2451 /* Extracting a bit-field from a constant */
2452 HOST_WIDE_INT val = INTVAL (op0);
2453
2454 if (BITS_BIG_ENDIAN)
2455 val >>= (GET_MODE_BITSIZE (op0_mode)
2456 - INTVAL (op2) - INTVAL (op1));
2457 else
2458 val >>= INTVAL (op2);
2459
2460 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2461 {
2462 /* First zero-extend. */
2463 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2464 /* If desired, propagate sign bit. */
2465 if (code == SIGN_EXTRACT
2466 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2467 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2468 }
2469
2470 /* Clear the bits that don't belong in our mode,
2471 unless they and our sign bit are all one.
2472 So we get either a reasonable negative value or a reasonable
2473 unsigned value for this mode. */
2474 if (width < HOST_BITS_PER_WIDE_INT
2475 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2476 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2477 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2478
2479 return GEN_INT (val);
2480 }
2481 break;
2482
2483 case IF_THEN_ELSE:
2484 if (GET_CODE (op0) == CONST_INT)
2485 return op0 != const0_rtx ? op1 : op2;
2486
2487 /* Convert a == b ? b : a to "a". */
2488 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2489 && !HONOR_NANS (mode)
2490 && rtx_equal_p (XEXP (op0, 0), op1)
2491 && rtx_equal_p (XEXP (op0, 1), op2))
2492 return op1;
2493 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2494 && !HONOR_NANS (mode)
2495 && rtx_equal_p (XEXP (op0, 1), op1)
2496 && rtx_equal_p (XEXP (op0, 0), op2))
2497 return op2;
2498 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2499 {
2500 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2501 ? GET_MODE (XEXP (op0, 1))
2502 : GET_MODE (XEXP (op0, 0)));
2503 rtx temp;
2504 if (cmp_mode == VOIDmode)
2505 cmp_mode = op0_mode;
2506 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2507 XEXP (op0, 0), XEXP (op0, 1));
2508
2509 /* See if any simplifications were possible. */
2510 if (temp == const0_rtx)
2511 return op2;
2512 else if (temp == const1_rtx)
2513 return op1;
2514 else if (temp)
2515 op0 = temp;
2516
2517 /* Look for happy constants in op1 and op2. */
2518 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2519 {
2520 HOST_WIDE_INT t = INTVAL (op1);
2521 HOST_WIDE_INT f = INTVAL (op2);
2522
2523 if (t == STORE_FLAG_VALUE && f == 0)
2524 code = GET_CODE (op0);
2525 else if (t == 0 && f == STORE_FLAG_VALUE)
2526 {
2527 enum rtx_code tmp;
2528 tmp = reversed_comparison_code (op0, NULL_RTX);
2529 if (tmp == UNKNOWN)
2530 break;
2531 code = tmp;
2532 }
2533 else
2534 break;
2535
2536 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2537 }
2538 }
2539 break;
2540 case VEC_MERGE:
2541 if (GET_MODE (op0) != mode
2542 || GET_MODE (op1) != mode
2543 || !VECTOR_MODE_P (mode))
2544 abort ();
2545 op2 = avoid_constant_pool_reference (op2);
2546 if (GET_CODE (op2) == CONST_INT)
2547 {
2548 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2549 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2550 int mask = (1 << n_elts) - 1;
2551
2552 if (!(INTVAL (op2) & mask))
2553 return op1;
2554 if ((INTVAL (op2) & mask) == mask)
2555 return op0;
2556
2557 op0 = avoid_constant_pool_reference (op0);
2558 op1 = avoid_constant_pool_reference (op1);
2559 if (GET_CODE (op0) == CONST_VECTOR
2560 && GET_CODE (op1) == CONST_VECTOR)
2561 {
2562 rtvec v = rtvec_alloc (n_elts);
2563 unsigned int i;
2564
2565 for (i = 0; i < n_elts; i++)
2566 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2567 ? CONST_VECTOR_ELT (op0, i)
2568 : CONST_VECTOR_ELT (op1, i));
2569 return gen_rtx_CONST_VECTOR (mode, v);
2570 }
2571 }
2572 break;
2573
2574 default:
2575 abort ();
2576 }
2577
2578 return 0;
2579 }
2580
2581 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2582 Return 0 if no simplifications is possible. */
2583 rtx
2584 simplify_subreg (enum machine_mode outermode, rtx op,
2585 enum machine_mode innermode, unsigned int byte)
2586 {
2587 /* Little bit of sanity checking. */
2588 if (innermode == VOIDmode || outermode == VOIDmode
2589 || innermode == BLKmode || outermode == BLKmode)
2590 abort ();
2591
2592 if (GET_MODE (op) != innermode
2593 && GET_MODE (op) != VOIDmode)
2594 abort ();
2595
2596 if (byte % GET_MODE_SIZE (outermode)
2597 || byte >= GET_MODE_SIZE (innermode))
2598 abort ();
2599
2600 if (outermode == innermode && !byte)
2601 return op;
2602
2603 /* Simplify subregs of vector constants. */
2604 if (GET_CODE (op) == CONST_VECTOR)
2605 {
2606 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2607 const unsigned int offset = byte / elt_size;
2608 rtx elt;
2609
2610 if (GET_MODE_INNER (innermode) == outermode)
2611 {
2612 elt = CONST_VECTOR_ELT (op, offset);
2613
2614 /* ?? We probably don't need this copy_rtx because constants
2615 can be shared. ?? */
2616
2617 return copy_rtx (elt);
2618 }
2619 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2620 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2621 {
2622 return (gen_rtx_CONST_VECTOR
2623 (outermode,
2624 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2625 &CONST_VECTOR_ELT (op, offset))));
2626 }
2627 else if (GET_MODE_CLASS (outermode) == MODE_INT
2628 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2629 {
2630 /* This happens when the target register size is smaller then
2631 the vector mode, and we synthesize operations with vectors
2632 of elements that are smaller than the register size. */
2633 HOST_WIDE_INT sum = 0, high = 0;
2634 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2635 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2636 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2637 int shift = BITS_PER_UNIT * elt_size;
2638 unsigned HOST_WIDE_INT unit_mask;
2639
2640 unit_mask = (unsigned HOST_WIDE_INT) -1
2641 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2642
2643 for (; n_elts--; i += step)
2644 {
2645 elt = CONST_VECTOR_ELT (op, i);
2646 if (GET_CODE (elt) == CONST_DOUBLE
2647 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2648 {
2649 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2650 elt);
2651 if (! elt)
2652 return NULL_RTX;
2653 }
2654 if (GET_CODE (elt) != CONST_INT)
2655 return NULL_RTX;
2656 /* Avoid overflow. */
2657 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2658 return NULL_RTX;
2659 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2660 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2661 }
2662 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2663 return GEN_INT (trunc_int_for_mode (sum, outermode));
2664 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2665 return immed_double_const (sum, high, outermode);
2666 else
2667 return NULL_RTX;
2668 }
2669 else if (GET_MODE_CLASS (outermode) == MODE_INT
2670 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2671 {
2672 enum machine_mode new_mode
2673 = int_mode_for_mode (GET_MODE_INNER (innermode));
2674 int subbyte = byte % elt_size;
2675
2676 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2677 if (! op)
2678 return NULL_RTX;
2679 return simplify_subreg (outermode, op, new_mode, subbyte);
2680 }
2681 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2682 /* This shouldn't happen, but let's not do anything stupid. */
2683 return NULL_RTX;
2684 }
2685
2686 /* Attempt to simplify constant to non-SUBREG expression. */
2687 if (CONSTANT_P (op))
2688 {
2689 int offset, part;
2690 unsigned HOST_WIDE_INT val = 0;
2691
2692 if (VECTOR_MODE_P (outermode))
2693 {
2694 /* Construct a CONST_VECTOR from individual subregs. */
2695 enum machine_mode submode = GET_MODE_INNER (outermode);
2696 int subsize = GET_MODE_UNIT_SIZE (outermode);
2697 int i, elts = GET_MODE_NUNITS (outermode);
2698 rtvec v = rtvec_alloc (elts);
2699 rtx elt;
2700
2701 for (i = 0; i < elts; i++, byte += subsize)
2702 {
2703 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2704 /* ??? It would be nice if we could actually make such subregs
2705 on targets that allow such relocations. */
2706 if (byte >= GET_MODE_SIZE (innermode))
2707 elt = CONST0_RTX (submode);
2708 else
2709 elt = simplify_subreg (submode, op, innermode, byte);
2710 if (! elt)
2711 return NULL_RTX;
2712 RTVEC_ELT (v, i) = elt;
2713 }
2714 return gen_rtx_CONST_VECTOR (outermode, v);
2715 }
2716
2717 /* ??? This code is partly redundant with code below, but can handle
2718 the subregs of floats and similar corner cases.
2719 Later it we should move all simplification code here and rewrite
2720 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2721 using SIMPLIFY_SUBREG. */
2722 if (subreg_lowpart_offset (outermode, innermode) == byte
2723 && GET_CODE (op) != CONST_VECTOR)
2724 {
2725 rtx new = gen_lowpart_if_possible (outermode, op);
2726 if (new)
2727 return new;
2728 }
2729
2730 /* Similar comment as above apply here. */
2731 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2732 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2733 && GET_MODE_CLASS (outermode) == MODE_INT)
2734 {
2735 rtx new = constant_subword (op,
2736 (byte / UNITS_PER_WORD),
2737 innermode);
2738 if (new)
2739 return new;
2740 }
2741
2742 if (GET_MODE_CLASS (outermode) != MODE_INT
2743 && GET_MODE_CLASS (outermode) != MODE_CC)
2744 {
2745 enum machine_mode new_mode = int_mode_for_mode (outermode);
2746
2747 if (new_mode != innermode || byte != 0)
2748 {
2749 op = simplify_subreg (new_mode, op, innermode, byte);
2750 if (! op)
2751 return NULL_RTX;
2752 return simplify_subreg (outermode, op, new_mode, 0);
2753 }
2754 }
2755
2756 offset = byte * BITS_PER_UNIT;
2757 switch (GET_CODE (op))
2758 {
2759 case CONST_DOUBLE:
2760 if (GET_MODE (op) != VOIDmode)
2761 break;
2762
2763 /* We can't handle this case yet. */
2764 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2765 return NULL_RTX;
2766
2767 part = offset >= HOST_BITS_PER_WIDE_INT;
2768 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2769 && BYTES_BIG_ENDIAN)
2770 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2771 && WORDS_BIG_ENDIAN))
2772 part = !part;
2773 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2774 offset %= HOST_BITS_PER_WIDE_INT;
2775
2776 /* We've already picked the word we want from a double, so
2777 pretend this is actually an integer. */
2778 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2779
2780 /* FALLTHROUGH */
2781 case CONST_INT:
2782 if (GET_CODE (op) == CONST_INT)
2783 val = INTVAL (op);
2784
2785 /* We don't handle synthesizing of non-integral constants yet. */
2786 if (GET_MODE_CLASS (outermode) != MODE_INT)
2787 return NULL_RTX;
2788
2789 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2790 {
2791 if (WORDS_BIG_ENDIAN)
2792 offset = (GET_MODE_BITSIZE (innermode)
2793 - GET_MODE_BITSIZE (outermode) - offset);
2794 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2795 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2796 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2797 - 2 * (offset % BITS_PER_WORD));
2798 }
2799
2800 if (offset >= HOST_BITS_PER_WIDE_INT)
2801 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2802 else
2803 {
2804 val >>= offset;
2805 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2806 val = trunc_int_for_mode (val, outermode);
2807 return GEN_INT (val);
2808 }
2809 default:
2810 break;
2811 }
2812 }
2813
2814 /* Changing mode twice with SUBREG => just change it once,
2815 or not at all if changing back op starting mode. */
2816 if (GET_CODE (op) == SUBREG)
2817 {
2818 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2819 int final_offset = byte + SUBREG_BYTE (op);
2820 rtx new;
2821
2822 if (outermode == innermostmode
2823 && byte == 0 && SUBREG_BYTE (op) == 0)
2824 return SUBREG_REG (op);
2825
2826 /* The SUBREG_BYTE represents offset, as if the value were stored
2827 in memory. Irritating exception is paradoxical subreg, where
2828 we define SUBREG_BYTE to be 0. On big endian machines, this
2829 value should be negative. For a moment, undo this exception. */
2830 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2831 {
2832 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2833 if (WORDS_BIG_ENDIAN)
2834 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2835 if (BYTES_BIG_ENDIAN)
2836 final_offset += difference % UNITS_PER_WORD;
2837 }
2838 if (SUBREG_BYTE (op) == 0
2839 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2840 {
2841 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2842 if (WORDS_BIG_ENDIAN)
2843 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2844 if (BYTES_BIG_ENDIAN)
2845 final_offset += difference % UNITS_PER_WORD;
2846 }
2847
2848 /* See whether resulting subreg will be paradoxical. */
2849 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2850 {
2851 /* In nonparadoxical subregs we can't handle negative offsets. */
2852 if (final_offset < 0)
2853 return NULL_RTX;
2854 /* Bail out in case resulting subreg would be incorrect. */
2855 if (final_offset % GET_MODE_SIZE (outermode)
2856 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2857 return NULL_RTX;
2858 }
2859 else
2860 {
2861 int offset = 0;
2862 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2863
2864 /* In paradoxical subreg, see if we are still looking on lower part.
2865 If so, our SUBREG_BYTE will be 0. */
2866 if (WORDS_BIG_ENDIAN)
2867 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2868 if (BYTES_BIG_ENDIAN)
2869 offset += difference % UNITS_PER_WORD;
2870 if (offset == final_offset)
2871 final_offset = 0;
2872 else
2873 return NULL_RTX;
2874 }
2875
2876 /* Recurse for further possible simplifications. */
2877 new = simplify_subreg (outermode, SUBREG_REG (op),
2878 GET_MODE (SUBREG_REG (op)),
2879 final_offset);
2880 if (new)
2881 return new;
2882 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2883 }
2884
2885 /* SUBREG of a hard register => just change the register number
2886 and/or mode. If the hard register is not valid in that mode,
2887 suppress this simplification. If the hard register is the stack,
2888 frame, or argument pointer, leave this as a SUBREG. */
2889
2890 if (REG_P (op)
2891 && (! REG_FUNCTION_VALUE_P (op)
2892 || ! rtx_equal_function_value_matters)
2893 && REGNO (op) < FIRST_PSEUDO_REGISTER
2894 #ifdef CANNOT_CHANGE_MODE_CLASS
2895 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2896 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2897 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2898 #endif
2899 && ((reload_completed && !frame_pointer_needed)
2900 || (REGNO (op) != FRAME_POINTER_REGNUM
2901 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2902 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2903 #endif
2904 ))
2905 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2906 && REGNO (op) != ARG_POINTER_REGNUM
2907 #endif
2908 && REGNO (op) != STACK_POINTER_REGNUM)
2909 {
2910 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2911 0);
2912
2913 /* ??? We do allow it if the current REG is not valid for
2914 its mode. This is a kludge to work around how float/complex
2915 arguments are passed on 32-bit SPARC and should be fixed. */
2916 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2917 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2918 {
2919 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2920
2921 /* Propagate original regno. We don't have any way to specify
2922 the offset inside original regno, so do so only for lowpart.
2923 The information is used only by alias analysis that can not
2924 grog partial register anyway. */
2925
2926 if (subreg_lowpart_offset (outermode, innermode) == byte)
2927 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2928 return x;
2929 }
2930 }
2931
2932 /* If we have a SUBREG of a register that we are replacing and we are
2933 replacing it with a MEM, make a new MEM and try replacing the
2934 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2935 or if we would be widening it. */
2936
2937 if (GET_CODE (op) == MEM
2938 && ! mode_dependent_address_p (XEXP (op, 0))
2939 /* Allow splitting of volatile memory references in case we don't
2940 have instruction to move the whole thing. */
2941 && (! MEM_VOLATILE_P (op)
2942 || ! have_insn_for (SET, innermode))
2943 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2944 return adjust_address_nv (op, outermode, byte);
2945
2946 /* Handle complex values represented as CONCAT
2947 of real and imaginary part. */
2948 if (GET_CODE (op) == CONCAT)
2949 {
2950 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2951 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2952 unsigned int final_offset;
2953 rtx res;
2954
2955 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2956 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2957 if (res)
2958 return res;
2959 /* We can at least simplify it by referring directly to the relevant part. */
2960 return gen_rtx_SUBREG (outermode, part, final_offset);
2961 }
2962
2963 return NULL_RTX;
2964 }
2965 /* Make a SUBREG operation or equivalent if it folds. */
2966
2967 rtx
2968 simplify_gen_subreg (enum machine_mode outermode, rtx op,
2969 enum machine_mode innermode, unsigned int byte)
2970 {
2971 rtx new;
2972 /* Little bit of sanity checking. */
2973 if (innermode == VOIDmode || outermode == VOIDmode
2974 || innermode == BLKmode || outermode == BLKmode)
2975 abort ();
2976
2977 if (GET_MODE (op) != innermode
2978 && GET_MODE (op) != VOIDmode)
2979 abort ();
2980
2981 if (byte % GET_MODE_SIZE (outermode)
2982 || byte >= GET_MODE_SIZE (innermode))
2983 abort ();
2984
2985 if (GET_CODE (op) == QUEUED)
2986 return NULL_RTX;
2987
2988 new = simplify_subreg (outermode, op, innermode, byte);
2989 if (new)
2990 return new;
2991
2992 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2993 return NULL_RTX;
2994
2995 return gen_rtx_SUBREG (outermode, op, byte);
2996 }
2997 /* Simplify X, an rtx expression.
2998
2999 Return the simplified expression or NULL if no simplifications
3000 were possible.
3001
3002 This is the preferred entry point into the simplification routines;
3003 however, we still allow passes to call the more specific routines.
3004
3005 Right now GCC has three (yes, three) major bodies of RTL simplification
3006 code that need to be unified.
3007
3008 1. fold_rtx in cse.c. This code uses various CSE specific
3009 information to aid in RTL simplification.
3010
3011 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3012 it uses combine specific information to aid in RTL
3013 simplification.
3014
3015 3. The routines in this file.
3016
3017
3018 Long term we want to only have one body of simplification code; to
3019 get to that state I recommend the following steps:
3020
3021 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3022 which are not pass dependent state into these routines.
3023
3024 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3025 use this routine whenever possible.
3026
3027 3. Allow for pass dependent state to be provided to these
3028 routines and add simplifications based on the pass dependent
3029 state. Remove code from cse.c & combine.c that becomes
3030 redundant/dead.
3031
3032 It will take time, but ultimately the compiler will be easier to
3033 maintain and improve. It's totally silly that when we add a
3034 simplification that it needs to be added to 4 places (3 for RTL
3035 simplification and 1 for tree simplification. */
3036
3037 rtx
3038 simplify_rtx (rtx x)
3039 {
3040 enum rtx_code code = GET_CODE (x);
3041 enum machine_mode mode = GET_MODE (x);
3042 rtx temp;
3043
3044 switch (GET_RTX_CLASS (code))
3045 {
3046 case '1':
3047 return simplify_unary_operation (code, mode,
3048 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3049 case 'c':
3050 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3051 {
3052 rtx tem;
3053
3054 tem = XEXP (x, 0);
3055 XEXP (x, 0) = XEXP (x, 1);
3056 XEXP (x, 1) = tem;
3057 return simplify_binary_operation (code, mode,
3058 XEXP (x, 0), XEXP (x, 1));
3059 }
3060
3061 case '2':
3062 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3063
3064 case '3':
3065 case 'b':
3066 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3067 XEXP (x, 0), XEXP (x, 1),
3068 XEXP (x, 2));
3069
3070 case '<':
3071 temp = simplify_relational_operation (code,
3072 ((GET_MODE (XEXP (x, 0))
3073 != VOIDmode)
3074 ? GET_MODE (XEXP (x, 0))
3075 : GET_MODE (XEXP (x, 1))),
3076 XEXP (x, 0), XEXP (x, 1));
3077 #ifdef FLOAT_STORE_FLAG_VALUE
3078 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3079 {
3080 if (temp == const0_rtx)
3081 temp = CONST0_RTX (mode);
3082 else
3083 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3084 mode);
3085 }
3086 #endif
3087 return temp;
3088
3089 case 'x':
3090 if (code == SUBREG)
3091 return simplify_gen_subreg (mode, SUBREG_REG (x),
3092 GET_MODE (SUBREG_REG (x)),
3093 SUBREG_BYTE (x));
3094 if (code == CONSTANT_P_RTX)
3095 {
3096 if (CONSTANT_P (XEXP (x, 0)))
3097 return const1_rtx;
3098 }
3099 break;
3100
3101 case 'o':
3102 if (code == LO_SUM)
3103 {
3104 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3105 if (GET_CODE (XEXP (x, 0)) == HIGH
3106 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3107 return XEXP (x, 1);
3108 }
3109 break;
3110
3111 default:
3112 break;
3113 }
3114 return NULL;
3115 }