simplify-rtx.c (simplify_rtx): Use simplify_gen_binary to swap commutative operands...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 \f
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
59 static rtx
60 neg_const_int (enum machine_mode mode, rtx i)
61 {
62 return gen_int_mode (- INTVAL (i), mode);
63 }
64
65 \f
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
68
69 rtx
70 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
71 rtx op1)
72 {
73 rtx tem;
74
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code) == 'c'
77 && swap_commutative_operands_p (op0, op1))
78 tem = op0, op0 = op1, op1 = tem;
79
80 /* If this simplifies, do it. */
81 tem = simplify_binary_operation (code, mode, op0, op1);
82 if (tem)
83 return tem;
84
85 /* Handle addition and subtraction specially. Otherwise, just form
86 the operation. */
87
88 if (code == PLUS || code == MINUS)
89 {
90 tem = simplify_plus_minus (code, mode, op0, op1, 1);
91 if (tem)
92 return tem;
93 }
94
95 return gen_rtx_fmt_ee (code, mode, op0, op1);
96 }
97 \f
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
100 rtx
101 avoid_constant_pool_reference (rtx x)
102 {
103 rtx c, tmp, addr;
104 enum machine_mode cmode;
105
106 switch (GET_CODE (x))
107 {
108 case MEM:
109 break;
110
111 case FLOAT_EXTEND:
112 /* Handle float extensions of constant pool references. */
113 tmp = XEXP (x, 0);
114 c = avoid_constant_pool_reference (tmp);
115 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
116 {
117 REAL_VALUE_TYPE d;
118
119 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
121 }
122 return x;
123
124 default:
125 return x;
126 }
127
128 addr = XEXP (x, 0);
129
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr = (*targetm.delegitimize_address) (addr);
132
133 if (GET_CODE (addr) == LO_SUM)
134 addr = XEXP (addr, 1);
135
136 if (GET_CODE (addr) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr))
138 return x;
139
140 c = get_pool_constant (addr);
141 cmode = get_pool_mode (addr);
142
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode != GET_MODE (x))
147 {
148 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
149 return c ? c : x;
150 }
151
152 return c;
153 }
154 \f
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
157
158 rtx
159 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
160 enum machine_mode op_mode)
161 {
162 rtx tem;
163
164 /* If this simplifies, use it. */
165 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
166 return tem;
167
168 return gen_rtx_fmt_e (code, mode, op);
169 }
170
171 /* Likewise for ternary operations. */
172
173 rtx
174 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
175 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
176 {
177 rtx tem;
178
179 /* If this simplifies, use it. */
180 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
181 op0, op1, op2)))
182 return tem;
183
184 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
185 }
186 \f
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
189 */
190
191 rtx
192 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
193 enum machine_mode cmp_mode, rtx op0, rtx op1)
194 {
195 rtx tem;
196
197 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
198 return tem;
199
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
202 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
203
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
206 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
207
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code == NE && op1 == const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
211 return op0;
212 else if (code == EQ && op1 == const0_rtx)
213 {
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
216 if (new != UNKNOWN)
217 {
218 code = new;
219 mode = cmp_mode;
220 op1 = XEXP (op0, 1);
221 op0 = XEXP (op0, 0);
222 }
223 }
224
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0, op1))
227 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
228
229 return gen_rtx_fmt_ee (code, mode, op0, op1);
230 }
231 \f
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
234
235 rtx
236 simplify_replace_rtx (rtx x, rtx old, rtx new)
237 {
238 enum rtx_code code = GET_CODE (x);
239 enum machine_mode mode = GET_MODE (x);
240
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
244
245 if (x == old)
246 return new;
247
248 switch (GET_RTX_CLASS (code))
249 {
250 case '1':
251 {
252 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
253 rtx op = (XEXP (x, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
255
256 return simplify_gen_unary (code, mode, op, op_mode);
257 }
258
259 case '2':
260 case 'c':
261 return
262 simplify_gen_binary (code, mode,
263 simplify_replace_rtx (XEXP (x, 0), old, new),
264 simplify_replace_rtx (XEXP (x, 1), old, new));
265 case '<':
266 {
267 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x, 0))
269 : GET_MODE (XEXP (x, 1)));
270 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
271 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
272 rtx temp = simplify_gen_relational (code, mode,
273 (op_mode != VOIDmode
274 ? op_mode
275 : GET_MODE (op0) != VOIDmode
276 ? GET_MODE (op0)
277 : GET_MODE (op1)),
278 op0, op1);
279 #ifdef FLOAT_STORE_FLAG_VALUE
280 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
281 {
282 if (temp == const0_rtx)
283 temp = CONST0_RTX (mode);
284 else if (temp == const_true_rtx)
285 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
286 mode);
287 }
288 #endif
289 return temp;
290 }
291
292 case '3':
293 case 'b':
294 {
295 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
296 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
297
298 return
299 simplify_gen_ternary (code, mode,
300 (op_mode != VOIDmode
301 ? op_mode
302 : GET_MODE (op0)),
303 op0,
304 simplify_replace_rtx (XEXP (x, 1), old, new),
305 simplify_replace_rtx (XEXP (x, 2), old, new));
306 }
307
308 case 'x':
309 /* The only case we try to handle is a SUBREG. */
310 if (code == SUBREG)
311 {
312 rtx exp;
313 exp = simplify_gen_subreg (GET_MODE (x),
314 simplify_replace_rtx (SUBREG_REG (x),
315 old, new),
316 GET_MODE (SUBREG_REG (x)),
317 SUBREG_BYTE (x));
318 if (exp)
319 x = exp;
320 }
321 return x;
322
323 case 'o':
324 if (code == MEM)
325 return replace_equiv_address_nv (x,
326 simplify_replace_rtx (XEXP (x, 0),
327 old, new));
328 else if (code == LO_SUM)
329 {
330 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
331 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
332
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
336
337 return gen_rtx_LO_SUM (mode, op0, op1);
338 }
339 else if (code == REG)
340 {
341 if (REG_P (old) && REGNO (x) == REGNO (old))
342 return new;
343 }
344
345 return x;
346
347 default:
348 return x;
349 }
350 return x;
351 }
352 \f
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
356 rtx
357 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
358 rtx op, enum machine_mode op_mode)
359 {
360 unsigned int width = GET_MODE_BITSIZE (mode);
361 rtx trueop = avoid_constant_pool_reference (op);
362
363 if (code == VEC_DUPLICATE)
364 {
365 if (!VECTOR_MODE_P (mode))
366 abort ();
367 if (GET_MODE (trueop) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop))
369 && GET_MODE_INNER (mode) != GET_MODE (trueop))
370 abort ();
371 if (GET_MODE (trueop) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop))
373 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
374 abort ();
375 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
376 || GET_CODE (trueop) == CONST_VECTOR)
377 {
378 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
379 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
380 rtvec v = rtvec_alloc (n_elts);
381 unsigned int i;
382
383 if (GET_CODE (trueop) != CONST_VECTOR)
384 for (i = 0; i < n_elts; i++)
385 RTVEC_ELT (v, i) = trueop;
386 else
387 {
388 enum machine_mode inmode = GET_MODE (trueop);
389 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
390 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
391
392 if (in_n_elts >= n_elts || n_elts % in_n_elts)
393 abort ();
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
396 }
397 return gen_rtx_CONST_VECTOR (mode, v);
398 }
399 }
400
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 {
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
410
411 if (op_n_elts != n_elts)
412 abort ();
413
414 for (i = 0; i < n_elts; i++)
415 {
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
422 }
423 return gen_rtx_CONST_VECTOR (mode, v);
424 }
425
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
429
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
432 {
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
435
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
440
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
444 }
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 if (op_mode == VOIDmode)
458 {
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
463 }
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
465 ;
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
468
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
472 }
473
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
476 {
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
479
480 switch (code)
481 {
482 case NOT:
483 val = ~ arg0;
484 break;
485
486 case NEG:
487 val = - arg0;
488 break;
489
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
493
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
500
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
504 ;
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
508
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
512 {
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
517 }
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
521
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
528
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
536
537 case TRUNCATE:
538 val = arg0;
539 break;
540
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
547 {
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
554 }
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
560
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
565 {
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
572 }
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574 {
575 val
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
580 }
581 else
582 return 0;
583 break;
584
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
591
592 default:
593 abort ();
594 }
595
596 val = trunc_int_for_mode (val, mode);
597
598 return GEN_INT (val);
599 }
600
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
607 {
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
610
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
615
616 switch (code)
617 {
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
622
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
626
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
633
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
637 {
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
642 }
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
646
647 case CLZ:
648 hv = 0;
649 if (h1 == 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
651 else
652 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
653 - HOST_BITS_PER_WIDE_INT;
654 break;
655
656 case CTZ:
657 hv = 0;
658 if (l1 == 0)
659 {
660 if (h1 == 0)
661 lv = GET_MODE_BITSIZE (mode);
662 else
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 }
665 else
666 lv = exact_log2 (l1 & -l1);
667 break;
668
669 case POPCOUNT:
670 hv = 0;
671 lv = 0;
672 while (l1)
673 lv++, l1 &= l1 - 1;
674 while (h1)
675 lv++, h1 &= h1 - 1;
676 break;
677
678 case PARITY:
679 hv = 0;
680 lv = 0;
681 while (l1)
682 lv++, l1 &= l1 - 1;
683 while (h1)
684 lv++, h1 &= h1 - 1;
685 lv &= 1;
686 break;
687
688 case TRUNCATE:
689 /* This is just a change-of-mode, so do nothing. */
690 lv = l1, hv = h1;
691 break;
692
693 case ZERO_EXTEND:
694 if (op_mode == VOIDmode)
695 abort ();
696
697 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
698 return 0;
699
700 hv = 0;
701 lv = l1 & GET_MODE_MASK (op_mode);
702 break;
703
704 case SIGN_EXTEND:
705 if (op_mode == VOIDmode
706 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
707 return 0;
708 else
709 {
710 lv = l1 & GET_MODE_MASK (op_mode);
711 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
712 && (lv & ((HOST_WIDE_INT) 1
713 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
714 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715
716 hv = HWI_SIGN_EXTEND (lv);
717 }
718 break;
719
720 case SQRT:
721 return 0;
722
723 default:
724 return 0;
725 }
726
727 return immed_double_const (lv, hv, mode);
728 }
729
730 else if (GET_CODE (trueop) == CONST_DOUBLE
731 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 {
733 REAL_VALUE_TYPE d, t;
734 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
735
736 switch (code)
737 {
738 case SQRT:
739 if (HONOR_SNANS (mode) && real_isnan (&d))
740 return 0;
741 real_sqrt (&t, mode, &d);
742 d = t;
743 break;
744 case ABS:
745 d = REAL_VALUE_ABS (d);
746 break;
747 case NEG:
748 d = REAL_VALUE_NEGATE (d);
749 break;
750 case FLOAT_TRUNCATE:
751 d = real_value_truncate (mode, d);
752 break;
753 case FLOAT_EXTEND:
754 /* All this does is change the mode. */
755 break;
756 case FIX:
757 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
758 break;
759
760 default:
761 abort ();
762 }
763 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
764 }
765
766 else if (GET_CODE (trueop) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode) == MODE_INT
769 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
770 {
771 HOST_WIDE_INT i;
772 REAL_VALUE_TYPE d;
773 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
774 switch (code)
775 {
776 case FIX: i = REAL_VALUE_FIX (d); break;
777 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
778 default:
779 abort ();
780 }
781 return gen_int_mode (i, mode);
782 }
783
784 /* This was formerly used only for non-IEEE float.
785 eggert@twinsun.com says it is safe for IEEE also. */
786 else
787 {
788 enum rtx_code reversed;
789 /* There are some simplifications we can do even if the operands
790 aren't constant. */
791 switch (code)
792 {
793 case NOT:
794 /* (not (not X)) == X. */
795 if (GET_CODE (op) == NOT)
796 return XEXP (op, 0);
797
798 /* (not (eq X Y)) == (ne X Y), etc. */
799 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
800 && ((reversed = reversed_comparison_code (op, NULL_RTX))
801 != UNKNOWN))
802 return gen_rtx_fmt_ee (reversed,
803 op_mode, XEXP (op, 0), XEXP (op, 1));
804 break;
805
806 case NEG:
807 /* (neg (neg X)) == X. */
808 if (GET_CODE (op) == NEG)
809 return XEXP (op, 0);
810 break;
811
812 case SIGN_EXTEND:
813 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
814 becomes just the MINUS if its mode is MODE. This allows
815 folding switch statements on machines using casesi (such as
816 the VAX). */
817 if (GET_CODE (op) == TRUNCATE
818 && GET_MODE (XEXP (op, 0)) == mode
819 && GET_CODE (XEXP (op, 0)) == MINUS
820 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
821 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
822 return XEXP (op, 0);
823
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && GET_CODE (SUBREG_REG (op)) == REG
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
835
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
837 case ZERO_EXTEND:
838 if (POINTERS_EXTEND_UNSIGNED > 0
839 && mode == Pmode && GET_MODE (op) == ptr_mode
840 && (CONSTANT_P (op)
841 || (GET_CODE (op) == SUBREG
842 && GET_CODE (SUBREG_REG (op)) == REG
843 && REG_POINTER (SUBREG_REG (op))
844 && GET_MODE (SUBREG_REG (op)) == Pmode)))
845 return convert_memory_address (Pmode, op);
846 break;
847 #endif
848
849 default:
850 break;
851 }
852
853 return 0;
854 }
855 }
856 \f
857 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
858 and OP1. Return 0 if no simplification is possible.
859
860 Don't use this for relational operations such as EQ or LT.
861 Use simplify_relational_operation instead. */
862 rtx
863 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
864 rtx op0, rtx op1)
865 {
866 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
867 HOST_WIDE_INT val;
868 unsigned int width = GET_MODE_BITSIZE (mode);
869 rtx tem;
870 rtx trueop0 = avoid_constant_pool_reference (op0);
871 rtx trueop1 = avoid_constant_pool_reference (op1);
872
873 /* Relational operations don't work here. We must know the mode
874 of the operands in order to do the comparison correctly.
875 Assuming a full word can give incorrect results.
876 Consider comparing 128 with -128 in QImode. */
877
878 if (GET_RTX_CLASS (code) == '<')
879 abort ();
880
881 /* Make sure the constant is second. */
882 if (GET_RTX_CLASS (code) == 'c'
883 && swap_commutative_operands_p (trueop0, trueop1))
884 {
885 tem = op0, op0 = op1, op1 = tem;
886 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
887 }
888
889 if (VECTOR_MODE_P (mode)
890 && GET_CODE (trueop0) == CONST_VECTOR
891 && GET_CODE (trueop1) == CONST_VECTOR)
892 {
893 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
894 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
895 enum machine_mode op0mode = GET_MODE (trueop0);
896 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
897 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
898 enum machine_mode op1mode = GET_MODE (trueop1);
899 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
900 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
901 rtvec v = rtvec_alloc (n_elts);
902 unsigned int i;
903
904 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
905 abort ();
906
907 for (i = 0; i < n_elts; i++)
908 {
909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
910 CONST_VECTOR_ELT (trueop0, i),
911 CONST_VECTOR_ELT (trueop1, i));
912 if (!x)
913 return 0;
914 RTVEC_ELT (v, i) = x;
915 }
916
917 return gen_rtx_CONST_VECTOR (mode, v);
918 }
919
920 if (GET_MODE_CLASS (mode) == MODE_FLOAT
921 && GET_CODE (trueop0) == CONST_DOUBLE
922 && GET_CODE (trueop1) == CONST_DOUBLE
923 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
924 {
925 REAL_VALUE_TYPE f0, f1, value;
926
927 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
928 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
929 f0 = real_value_truncate (mode, f0);
930 f1 = real_value_truncate (mode, f1);
931
932 if (HONOR_SNANS (mode)
933 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
934 return 0;
935
936 if (code == DIV
937 && REAL_VALUES_EQUAL (f1, dconst0)
938 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
939 return 0;
940
941 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
942
943 value = real_value_truncate (mode, value);
944 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
945 }
946
947 /* We can fold some multi-word operations. */
948 if (GET_MODE_CLASS (mode) == MODE_INT
949 && width == HOST_BITS_PER_WIDE_INT * 2
950 && (GET_CODE (trueop0) == CONST_DOUBLE
951 || GET_CODE (trueop0) == CONST_INT)
952 && (GET_CODE (trueop1) == CONST_DOUBLE
953 || GET_CODE (trueop1) == CONST_INT))
954 {
955 unsigned HOST_WIDE_INT l1, l2, lv;
956 HOST_WIDE_INT h1, h2, hv;
957
958 if (GET_CODE (trueop0) == CONST_DOUBLE)
959 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
960 else
961 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
962
963 if (GET_CODE (trueop1) == CONST_DOUBLE)
964 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
965 else
966 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
967
968 switch (code)
969 {
970 case MINUS:
971 /* A - B == A + (-B). */
972 neg_double (l2, h2, &lv, &hv);
973 l2 = lv, h2 = hv;
974
975 /* .. fall through ... */
976
977 case PLUS:
978 add_double (l1, h1, l2, h2, &lv, &hv);
979 break;
980
981 case MULT:
982 mul_double (l1, h1, l2, h2, &lv, &hv);
983 break;
984
985 case DIV: case MOD: case UDIV: case UMOD:
986 /* We'd need to include tree.h to do this and it doesn't seem worth
987 it. */
988 return 0;
989
990 case AND:
991 lv = l1 & l2, hv = h1 & h2;
992 break;
993
994 case IOR:
995 lv = l1 | l2, hv = h1 | h2;
996 break;
997
998 case XOR:
999 lv = l1 ^ l2, hv = h1 ^ h2;
1000 break;
1001
1002 case SMIN:
1003 if (h1 < h2
1004 || (h1 == h2
1005 && ((unsigned HOST_WIDE_INT) l1
1006 < (unsigned HOST_WIDE_INT) l2)))
1007 lv = l1, hv = h1;
1008 else
1009 lv = l2, hv = h2;
1010 break;
1011
1012 case SMAX:
1013 if (h1 > h2
1014 || (h1 == h2
1015 && ((unsigned HOST_WIDE_INT) l1
1016 > (unsigned HOST_WIDE_INT) l2)))
1017 lv = l1, hv = h1;
1018 else
1019 lv = l2, hv = h2;
1020 break;
1021
1022 case UMIN:
1023 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1024 || (h1 == h2
1025 && ((unsigned HOST_WIDE_INT) l1
1026 < (unsigned HOST_WIDE_INT) l2)))
1027 lv = l1, hv = h1;
1028 else
1029 lv = l2, hv = h2;
1030 break;
1031
1032 case UMAX:
1033 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1034 || (h1 == h2
1035 && ((unsigned HOST_WIDE_INT) l1
1036 > (unsigned HOST_WIDE_INT) l2)))
1037 lv = l1, hv = h1;
1038 else
1039 lv = l2, hv = h2;
1040 break;
1041
1042 case LSHIFTRT: case ASHIFTRT:
1043 case ASHIFT:
1044 case ROTATE: case ROTATERT:
1045 #ifdef SHIFT_COUNT_TRUNCATED
1046 if (SHIFT_COUNT_TRUNCATED)
1047 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1048 #endif
1049
1050 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1051 return 0;
1052
1053 if (code == LSHIFTRT || code == ASHIFTRT)
1054 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1055 code == ASHIFTRT);
1056 else if (code == ASHIFT)
1057 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1058 else if (code == ROTATE)
1059 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1060 else /* code == ROTATERT */
1061 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1062 break;
1063
1064 default:
1065 return 0;
1066 }
1067
1068 return immed_double_const (lv, hv, mode);
1069 }
1070
1071 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1072 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1073 {
1074 /* Even if we can't compute a constant result,
1075 there are some cases worth simplifying. */
1076
1077 switch (code)
1078 {
1079 case PLUS:
1080 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1081 when x is NaN, infinite, or finite and nonzero. They aren't
1082 when x is -0 and the rounding mode is not towards -infinity,
1083 since (-0) + 0 is then 0. */
1084 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1085 return op0;
1086
1087 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1088 transformations are safe even for IEEE. */
1089 if (GET_CODE (op0) == NEG)
1090 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1091 else if (GET_CODE (op1) == NEG)
1092 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1093
1094 /* (~a) + 1 -> -a */
1095 if (INTEGRAL_MODE_P (mode)
1096 && GET_CODE (op0) == NOT
1097 && trueop1 == const1_rtx)
1098 return gen_rtx_NEG (mode, XEXP (op0, 0));
1099
1100 /* Handle both-operands-constant cases. We can only add
1101 CONST_INTs to constants since the sum of relocatable symbols
1102 can't be handled by most assemblers. Don't add CONST_INT
1103 to CONST_INT since overflow won't be computed properly if wider
1104 than HOST_BITS_PER_WIDE_INT. */
1105
1106 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1107 && GET_CODE (op1) == CONST_INT)
1108 return plus_constant (op0, INTVAL (op1));
1109 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1110 && GET_CODE (op0) == CONST_INT)
1111 return plus_constant (op1, INTVAL (op0));
1112
1113 /* See if this is something like X * C - X or vice versa or
1114 if the multiplication is written as a shift. If so, we can
1115 distribute and make a new multiply, shift, or maybe just
1116 have X (if C is 2 in the example above). But don't make
1117 real multiply if we didn't have one before. */
1118
1119 if (! FLOAT_MODE_P (mode))
1120 {
1121 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1122 rtx lhs = op0, rhs = op1;
1123 int had_mult = 0;
1124
1125 if (GET_CODE (lhs) == NEG)
1126 coeff0 = -1, lhs = XEXP (lhs, 0);
1127 else if (GET_CODE (lhs) == MULT
1128 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1129 {
1130 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1131 had_mult = 1;
1132 }
1133 else if (GET_CODE (lhs) == ASHIFT
1134 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1135 && INTVAL (XEXP (lhs, 1)) >= 0
1136 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1137 {
1138 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1139 lhs = XEXP (lhs, 0);
1140 }
1141
1142 if (GET_CODE (rhs) == NEG)
1143 coeff1 = -1, rhs = XEXP (rhs, 0);
1144 else if (GET_CODE (rhs) == MULT
1145 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1146 {
1147 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1148 had_mult = 1;
1149 }
1150 else if (GET_CODE (rhs) == ASHIFT
1151 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1152 && INTVAL (XEXP (rhs, 1)) >= 0
1153 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1154 {
1155 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1156 rhs = XEXP (rhs, 0);
1157 }
1158
1159 if (rtx_equal_p (lhs, rhs))
1160 {
1161 tem = simplify_gen_binary (MULT, mode, lhs,
1162 GEN_INT (coeff0 + coeff1));
1163 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1164 }
1165 }
1166
1167 /* If one of the operands is a PLUS or a MINUS, see if we can
1168 simplify this by the associative law.
1169 Don't use the associative law for floating point.
1170 The inaccuracy makes it nonassociative,
1171 and subtle programs can break if operations are associated. */
1172
1173 if (INTEGRAL_MODE_P (mode)
1174 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1175 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1176 || (GET_CODE (op0) == CONST
1177 && GET_CODE (XEXP (op0, 0)) == PLUS)
1178 || (GET_CODE (op1) == CONST
1179 && GET_CODE (XEXP (op1, 0)) == PLUS))
1180 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1181 return tem;
1182 break;
1183
1184 case COMPARE:
1185 #ifdef HAVE_cc0
1186 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1187 using cc0, in which case we want to leave it as a COMPARE
1188 so we can distinguish it from a register-register-copy.
1189
1190 In IEEE floating point, x-0 is not the same as x. */
1191
1192 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1194 && trueop1 == CONST0_RTX (mode))
1195 return op0;
1196 #endif
1197
1198 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1199 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1200 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1201 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1202 {
1203 rtx xop00 = XEXP (op0, 0);
1204 rtx xop10 = XEXP (op1, 0);
1205
1206 #ifdef HAVE_cc0
1207 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1208 #else
1209 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1210 && GET_MODE (xop00) == GET_MODE (xop10)
1211 && REGNO (xop00) == REGNO (xop10)
1212 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1213 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1214 #endif
1215 return xop00;
1216 }
1217 break;
1218
1219 case MINUS:
1220 /* We can't assume x-x is 0 even with non-IEEE floating point,
1221 but since it is zero except in very strange circumstances, we
1222 will treat it as zero with -funsafe-math-optimizations. */
1223 if (rtx_equal_p (trueop0, trueop1)
1224 && ! side_effects_p (op0)
1225 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1226 return CONST0_RTX (mode);
1227
1228 /* Change subtraction from zero into negation. (0 - x) is the
1229 same as -x when x is NaN, infinite, or finite and nonzero.
1230 But if the mode has signed zeros, and does not round towards
1231 -infinity, then 0 - 0 is 0, not -0. */
1232 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1233 return gen_rtx_NEG (mode, op1);
1234
1235 /* (-1 - a) is ~a. */
1236 if (trueop0 == constm1_rtx)
1237 return gen_rtx_NOT (mode, op1);
1238
1239 /* Subtracting 0 has no effect unless the mode has signed zeros
1240 and supports rounding towards -infinity. In such a case,
1241 0 - 0 is -0. */
1242 if (!(HONOR_SIGNED_ZEROS (mode)
1243 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1244 && trueop1 == CONST0_RTX (mode))
1245 return op0;
1246
1247 /* See if this is something like X * C - X or vice versa or
1248 if the multiplication is written as a shift. If so, we can
1249 distribute and make a new multiply, shift, or maybe just
1250 have X (if C is 2 in the example above). But don't make
1251 real multiply if we didn't have one before. */
1252
1253 if (! FLOAT_MODE_P (mode))
1254 {
1255 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1256 rtx lhs = op0, rhs = op1;
1257 int had_mult = 0;
1258
1259 if (GET_CODE (lhs) == NEG)
1260 coeff0 = -1, lhs = XEXP (lhs, 0);
1261 else if (GET_CODE (lhs) == MULT
1262 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1263 {
1264 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1265 had_mult = 1;
1266 }
1267 else if (GET_CODE (lhs) == ASHIFT
1268 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1269 && INTVAL (XEXP (lhs, 1)) >= 0
1270 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1271 {
1272 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1273 lhs = XEXP (lhs, 0);
1274 }
1275
1276 if (GET_CODE (rhs) == NEG)
1277 coeff1 = - 1, rhs = XEXP (rhs, 0);
1278 else if (GET_CODE (rhs) == MULT
1279 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1280 {
1281 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1282 had_mult = 1;
1283 }
1284 else if (GET_CODE (rhs) == ASHIFT
1285 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs, 1)) >= 0
1287 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1288 {
1289 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1290 rhs = XEXP (rhs, 0);
1291 }
1292
1293 if (rtx_equal_p (lhs, rhs))
1294 {
1295 tem = simplify_gen_binary (MULT, mode, lhs,
1296 GEN_INT (coeff0 - coeff1));
1297 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1298 }
1299 }
1300
1301 /* (a - (-b)) -> (a + b). True even for IEEE. */
1302 if (GET_CODE (op1) == NEG)
1303 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1304
1305 /* If one of the operands is a PLUS or a MINUS, see if we can
1306 simplify this by the associative law.
1307 Don't use the associative law for floating point.
1308 The inaccuracy makes it nonassociative,
1309 and subtle programs can break if operations are associated. */
1310
1311 if (INTEGRAL_MODE_P (mode)
1312 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1313 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1314 || (GET_CODE (op0) == CONST
1315 && GET_CODE (XEXP (op0, 0)) == PLUS)
1316 || (GET_CODE (op1) == CONST
1317 && GET_CODE (XEXP (op1, 0)) == PLUS))
1318 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1319 return tem;
1320
1321 /* Don't let a relocatable value get a negative coeff. */
1322 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1323 return simplify_gen_binary (PLUS, mode,
1324 op0,
1325 neg_const_int (mode, op1));
1326
1327 /* (x - (x & y)) -> (x & ~y) */
1328 if (GET_CODE (op1) == AND)
1329 {
1330 if (rtx_equal_p (op0, XEXP (op1, 0)))
1331 {
1332 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1333 GET_MODE (XEXP (op1, 1)));
1334 return simplify_gen_binary (AND, mode, op0, tem);
1335 }
1336 if (rtx_equal_p (op0, XEXP (op1, 1)))
1337 {
1338 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1339 GET_MODE (XEXP (op1, 0)));
1340 return simplify_gen_binary (AND, mode, op0, tem);
1341 }
1342 }
1343 break;
1344
1345 case MULT:
1346 if (trueop1 == constm1_rtx)
1347 {
1348 tem = simplify_unary_operation (NEG, mode, op0, mode);
1349
1350 return tem ? tem : gen_rtx_NEG (mode, op0);
1351 }
1352
1353 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1354 x is NaN, since x * 0 is then also NaN. Nor is it valid
1355 when the mode has signed zeros, since multiplying a negative
1356 number by 0 will give -0, not 0. */
1357 if (!HONOR_NANS (mode)
1358 && !HONOR_SIGNED_ZEROS (mode)
1359 && trueop1 == CONST0_RTX (mode)
1360 && ! side_effects_p (op0))
1361 return op1;
1362
1363 /* In IEEE floating point, x*1 is not equivalent to x for
1364 signalling NaNs. */
1365 if (!HONOR_SNANS (mode)
1366 && trueop1 == CONST1_RTX (mode))
1367 return op0;
1368
1369 /* Convert multiply by constant power of two into shift unless
1370 we are still generating RTL. This test is a kludge. */
1371 if (GET_CODE (trueop1) == CONST_INT
1372 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1373 /* If the mode is larger than the host word size, and the
1374 uppermost bit is set, then this isn't a power of two due
1375 to implicit sign extension. */
1376 && (width <= HOST_BITS_PER_WIDE_INT
1377 || val != HOST_BITS_PER_WIDE_INT - 1)
1378 && ! rtx_equal_function_value_matters)
1379 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1380
1381 /* x*2 is x+x and x*(-1) is -x */
1382 if (GET_CODE (trueop1) == CONST_DOUBLE
1383 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1384 && GET_MODE (op0) == mode)
1385 {
1386 REAL_VALUE_TYPE d;
1387 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1388
1389 if (REAL_VALUES_EQUAL (d, dconst2))
1390 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1391
1392 if (REAL_VALUES_EQUAL (d, dconstm1))
1393 return gen_rtx_NEG (mode, op0);
1394 }
1395 break;
1396
1397 case IOR:
1398 if (trueop1 == const0_rtx)
1399 return op0;
1400 if (GET_CODE (trueop1) == CONST_INT
1401 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1402 == GET_MODE_MASK (mode)))
1403 return op1;
1404 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1405 return op0;
1406 /* A | (~A) -> -1 */
1407 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1408 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1409 && ! side_effects_p (op0)
1410 && GET_MODE_CLASS (mode) != MODE_CC)
1411 return constm1_rtx;
1412 break;
1413
1414 case XOR:
1415 if (trueop1 == const0_rtx)
1416 return op0;
1417 if (GET_CODE (trueop1) == CONST_INT
1418 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1419 == GET_MODE_MASK (mode)))
1420 return gen_rtx_NOT (mode, op0);
1421 if (trueop0 == trueop1 && ! side_effects_p (op0)
1422 && GET_MODE_CLASS (mode) != MODE_CC)
1423 return const0_rtx;
1424 break;
1425
1426 case AND:
1427 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1428 return const0_rtx;
1429 if (GET_CODE (trueop1) == CONST_INT
1430 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1431 == GET_MODE_MASK (mode)))
1432 return op0;
1433 if (trueop0 == trueop1 && ! side_effects_p (op0)
1434 && GET_MODE_CLASS (mode) != MODE_CC)
1435 return op0;
1436 /* A & (~A) -> 0 */
1437 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1438 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1439 && ! side_effects_p (op0)
1440 && GET_MODE_CLASS (mode) != MODE_CC)
1441 return const0_rtx;
1442 break;
1443
1444 case UDIV:
1445 /* Convert divide by power of two into shift (divide by 1 handled
1446 below). */
1447 if (GET_CODE (trueop1) == CONST_INT
1448 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1449 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1450
1451 /* ... fall through ... */
1452
1453 case DIV:
1454 if (trueop1 == CONST1_RTX (mode))
1455 {
1456 /* On some platforms DIV uses narrower mode than its
1457 operands. */
1458 rtx x = gen_lowpart_common (mode, op0);
1459 if (x)
1460 return x;
1461 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1462 return gen_lowpart_SUBREG (mode, op0);
1463 else
1464 return op0;
1465 }
1466
1467 /* Maybe change 0 / x to 0. This transformation isn't safe for
1468 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1469 Nor is it safe for modes with signed zeros, since dividing
1470 0 by a negative number gives -0, not 0. */
1471 if (!HONOR_NANS (mode)
1472 && !HONOR_SIGNED_ZEROS (mode)
1473 && trueop0 == CONST0_RTX (mode)
1474 && ! side_effects_p (op1))
1475 return op0;
1476
1477 /* Change division by a constant into multiplication. Only do
1478 this with -funsafe-math-optimizations. */
1479 else if (GET_CODE (trueop1) == CONST_DOUBLE
1480 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1481 && trueop1 != CONST0_RTX (mode)
1482 && flag_unsafe_math_optimizations)
1483 {
1484 REAL_VALUE_TYPE d;
1485 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1486
1487 if (! REAL_VALUES_EQUAL (d, dconst0))
1488 {
1489 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1490 return gen_rtx_MULT (mode, op0,
1491 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1492 }
1493 }
1494 break;
1495
1496 case UMOD:
1497 /* Handle modulus by power of two (mod with 1 handled below). */
1498 if (GET_CODE (trueop1) == CONST_INT
1499 && exact_log2 (INTVAL (trueop1)) > 0)
1500 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1501
1502 /* ... fall through ... */
1503
1504 case MOD:
1505 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1506 && ! side_effects_p (op0) && ! side_effects_p (op1))
1507 return const0_rtx;
1508 break;
1509
1510 case ROTATERT:
1511 case ROTATE:
1512 case ASHIFTRT:
1513 /* Rotating ~0 always results in ~0. */
1514 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1515 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1516 && ! side_effects_p (op1))
1517 return op0;
1518
1519 /* ... fall through ... */
1520
1521 case ASHIFT:
1522 case LSHIFTRT:
1523 if (trueop1 == const0_rtx)
1524 return op0;
1525 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1526 return op0;
1527 break;
1528
1529 case SMIN:
1530 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1531 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1532 && ! side_effects_p (op0))
1533 return op1;
1534 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1535 return op0;
1536 break;
1537
1538 case SMAX:
1539 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1540 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1541 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1542 && ! side_effects_p (op0))
1543 return op1;
1544 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1545 return op0;
1546 break;
1547
1548 case UMIN:
1549 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1550 return op1;
1551 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1552 return op0;
1553 break;
1554
1555 case UMAX:
1556 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1557 return op1;
1558 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1559 return op0;
1560 break;
1561
1562 case SS_PLUS:
1563 case US_PLUS:
1564 case SS_MINUS:
1565 case US_MINUS:
1566 /* ??? There are simplifications that can be done. */
1567 return 0;
1568
1569 case VEC_SELECT:
1570 if (!VECTOR_MODE_P (mode))
1571 {
1572 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1573 || (mode
1574 != GET_MODE_INNER (GET_MODE (trueop0)))
1575 || GET_CODE (trueop1) != PARALLEL
1576 || XVECLEN (trueop1, 0) != 1
1577 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1578 abort ();
1579
1580 if (GET_CODE (trueop0) == CONST_VECTOR)
1581 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1582 }
1583 else
1584 {
1585 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1586 || (GET_MODE_INNER (mode)
1587 != GET_MODE_INNER (GET_MODE (trueop0)))
1588 || GET_CODE (trueop1) != PARALLEL)
1589 abort ();
1590
1591 if (GET_CODE (trueop0) == CONST_VECTOR)
1592 {
1593 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1594 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1595 rtvec v = rtvec_alloc (n_elts);
1596 unsigned int i;
1597
1598 if (XVECLEN (trueop1, 0) != (int) n_elts)
1599 abort ();
1600 for (i = 0; i < n_elts; i++)
1601 {
1602 rtx x = XVECEXP (trueop1, 0, i);
1603
1604 if (GET_CODE (x) != CONST_INT)
1605 abort ();
1606 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1607 }
1608
1609 return gen_rtx_CONST_VECTOR (mode, v);
1610 }
1611 }
1612 return 0;
1613 case VEC_CONCAT:
1614 {
1615 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1616 ? GET_MODE (trueop0)
1617 : GET_MODE_INNER (mode));
1618 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1619 ? GET_MODE (trueop1)
1620 : GET_MODE_INNER (mode));
1621
1622 if (!VECTOR_MODE_P (mode)
1623 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1624 != GET_MODE_SIZE (mode)))
1625 abort ();
1626
1627 if ((VECTOR_MODE_P (op0_mode)
1628 && (GET_MODE_INNER (mode)
1629 != GET_MODE_INNER (op0_mode)))
1630 || (!VECTOR_MODE_P (op0_mode)
1631 && GET_MODE_INNER (mode) != op0_mode))
1632 abort ();
1633
1634 if ((VECTOR_MODE_P (op1_mode)
1635 && (GET_MODE_INNER (mode)
1636 != GET_MODE_INNER (op1_mode)))
1637 || (!VECTOR_MODE_P (op1_mode)
1638 && GET_MODE_INNER (mode) != op1_mode))
1639 abort ();
1640
1641 if ((GET_CODE (trueop0) == CONST_VECTOR
1642 || GET_CODE (trueop0) == CONST_INT
1643 || GET_CODE (trueop0) == CONST_DOUBLE)
1644 && (GET_CODE (trueop1) == CONST_VECTOR
1645 || GET_CODE (trueop1) == CONST_INT
1646 || GET_CODE (trueop1) == CONST_DOUBLE))
1647 {
1648 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1649 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1650 rtvec v = rtvec_alloc (n_elts);
1651 unsigned int i;
1652 unsigned in_n_elts = 1;
1653
1654 if (VECTOR_MODE_P (op0_mode))
1655 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1656 for (i = 0; i < n_elts; i++)
1657 {
1658 if (i < in_n_elts)
1659 {
1660 if (!VECTOR_MODE_P (op0_mode))
1661 RTVEC_ELT (v, i) = trueop0;
1662 else
1663 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1664 }
1665 else
1666 {
1667 if (!VECTOR_MODE_P (op1_mode))
1668 RTVEC_ELT (v, i) = trueop1;
1669 else
1670 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1671 i - in_n_elts);
1672 }
1673 }
1674
1675 return gen_rtx_CONST_VECTOR (mode, v);
1676 }
1677 }
1678 return 0;
1679
1680 default:
1681 abort ();
1682 }
1683
1684 return 0;
1685 }
1686
1687 /* Get the integer argument values in two forms:
1688 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1689
1690 arg0 = INTVAL (trueop0);
1691 arg1 = INTVAL (trueop1);
1692
1693 if (width < HOST_BITS_PER_WIDE_INT)
1694 {
1695 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1696 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1697
1698 arg0s = arg0;
1699 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1700 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1701
1702 arg1s = arg1;
1703 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1704 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1705 }
1706 else
1707 {
1708 arg0s = arg0;
1709 arg1s = arg1;
1710 }
1711
1712 /* Compute the value of the arithmetic. */
1713
1714 switch (code)
1715 {
1716 case PLUS:
1717 val = arg0s + arg1s;
1718 break;
1719
1720 case MINUS:
1721 val = arg0s - arg1s;
1722 break;
1723
1724 case MULT:
1725 val = arg0s * arg1s;
1726 break;
1727
1728 case DIV:
1729 if (arg1s == 0
1730 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1731 && arg1s == -1))
1732 return 0;
1733 val = arg0s / arg1s;
1734 break;
1735
1736 case MOD:
1737 if (arg1s == 0
1738 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1739 && arg1s == -1))
1740 return 0;
1741 val = arg0s % arg1s;
1742 break;
1743
1744 case UDIV:
1745 if (arg1 == 0
1746 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1747 && arg1s == -1))
1748 return 0;
1749 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1750 break;
1751
1752 case UMOD:
1753 if (arg1 == 0
1754 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1755 && arg1s == -1))
1756 return 0;
1757 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1758 break;
1759
1760 case AND:
1761 val = arg0 & arg1;
1762 break;
1763
1764 case IOR:
1765 val = arg0 | arg1;
1766 break;
1767
1768 case XOR:
1769 val = arg0 ^ arg1;
1770 break;
1771
1772 case LSHIFTRT:
1773 /* If shift count is undefined, don't fold it; let the machine do
1774 what it wants. But truncate it if the machine will do that. */
1775 if (arg1 < 0)
1776 return 0;
1777
1778 #ifdef SHIFT_COUNT_TRUNCATED
1779 if (SHIFT_COUNT_TRUNCATED)
1780 arg1 %= width;
1781 #endif
1782
1783 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1784 break;
1785
1786 case ASHIFT:
1787 if (arg1 < 0)
1788 return 0;
1789
1790 #ifdef SHIFT_COUNT_TRUNCATED
1791 if (SHIFT_COUNT_TRUNCATED)
1792 arg1 %= width;
1793 #endif
1794
1795 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1796 break;
1797
1798 case ASHIFTRT:
1799 if (arg1 < 0)
1800 return 0;
1801
1802 #ifdef SHIFT_COUNT_TRUNCATED
1803 if (SHIFT_COUNT_TRUNCATED)
1804 arg1 %= width;
1805 #endif
1806
1807 val = arg0s >> arg1;
1808
1809 /* Bootstrap compiler may not have sign extended the right shift.
1810 Manually extend the sign to insure bootstrap cc matches gcc. */
1811 if (arg0s < 0 && arg1 > 0)
1812 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1813
1814 break;
1815
1816 case ROTATERT:
1817 if (arg1 < 0)
1818 return 0;
1819
1820 arg1 %= width;
1821 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1822 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1823 break;
1824
1825 case ROTATE:
1826 if (arg1 < 0)
1827 return 0;
1828
1829 arg1 %= width;
1830 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1831 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1832 break;
1833
1834 case COMPARE:
1835 /* Do nothing here. */
1836 return 0;
1837
1838 case SMIN:
1839 val = arg0s <= arg1s ? arg0s : arg1s;
1840 break;
1841
1842 case UMIN:
1843 val = ((unsigned HOST_WIDE_INT) arg0
1844 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1845 break;
1846
1847 case SMAX:
1848 val = arg0s > arg1s ? arg0s : arg1s;
1849 break;
1850
1851 case UMAX:
1852 val = ((unsigned HOST_WIDE_INT) arg0
1853 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1854 break;
1855
1856 case SS_PLUS:
1857 case US_PLUS:
1858 case SS_MINUS:
1859 case US_MINUS:
1860 /* ??? There are simplifications that can be done. */
1861 return 0;
1862
1863 default:
1864 abort ();
1865 }
1866
1867 val = trunc_int_for_mode (val, mode);
1868
1869 return GEN_INT (val);
1870 }
1871 \f
1872 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1873 PLUS or MINUS.
1874
1875 Rather than test for specific case, we do this by a brute-force method
1876 and do all possible simplifications until no more changes occur. Then
1877 we rebuild the operation.
1878
1879 If FORCE is true, then always generate the rtx. This is used to
1880 canonicalize stuff emitted from simplify_gen_binary. Note that this
1881 can still fail if the rtx is too complex. It won't fail just because
1882 the result is not 'simpler' than the input, however. */
1883
1884 struct simplify_plus_minus_op_data
1885 {
1886 rtx op;
1887 int neg;
1888 };
1889
1890 static int
1891 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
1892 {
1893 const struct simplify_plus_minus_op_data *d1 = p1;
1894 const struct simplify_plus_minus_op_data *d2 = p2;
1895
1896 return (commutative_operand_precedence (d2->op)
1897 - commutative_operand_precedence (d1->op));
1898 }
1899
1900 static rtx
1901 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
1902 rtx op1, int force)
1903 {
1904 struct simplify_plus_minus_op_data ops[8];
1905 rtx result, tem;
1906 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1907 int first, negate, changed;
1908 int i, j;
1909
1910 memset ((char *) ops, 0, sizeof ops);
1911
1912 /* Set up the two operands and then expand them until nothing has been
1913 changed. If we run out of room in our array, give up; this should
1914 almost never happen. */
1915
1916 ops[0].op = op0;
1917 ops[0].neg = 0;
1918 ops[1].op = op1;
1919 ops[1].neg = (code == MINUS);
1920
1921 do
1922 {
1923 changed = 0;
1924
1925 for (i = 0; i < n_ops; i++)
1926 {
1927 rtx this_op = ops[i].op;
1928 int this_neg = ops[i].neg;
1929 enum rtx_code this_code = GET_CODE (this_op);
1930
1931 switch (this_code)
1932 {
1933 case PLUS:
1934 case MINUS:
1935 if (n_ops == 7)
1936 return NULL_RTX;
1937
1938 ops[n_ops].op = XEXP (this_op, 1);
1939 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1940 n_ops++;
1941
1942 ops[i].op = XEXP (this_op, 0);
1943 input_ops++;
1944 changed = 1;
1945 break;
1946
1947 case NEG:
1948 ops[i].op = XEXP (this_op, 0);
1949 ops[i].neg = ! this_neg;
1950 changed = 1;
1951 break;
1952
1953 case CONST:
1954 if (n_ops < 7
1955 && GET_CODE (XEXP (this_op, 0)) == PLUS
1956 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1957 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1958 {
1959 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1960 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1961 ops[n_ops].neg = this_neg;
1962 n_ops++;
1963 input_consts++;
1964 changed = 1;
1965 }
1966 break;
1967
1968 case NOT:
1969 /* ~a -> (-a - 1) */
1970 if (n_ops != 7)
1971 {
1972 ops[n_ops].op = constm1_rtx;
1973 ops[n_ops++].neg = this_neg;
1974 ops[i].op = XEXP (this_op, 0);
1975 ops[i].neg = !this_neg;
1976 changed = 1;
1977 }
1978 break;
1979
1980 case CONST_INT:
1981 if (this_neg)
1982 {
1983 ops[i].op = neg_const_int (mode, this_op);
1984 ops[i].neg = 0;
1985 changed = 1;
1986 }
1987 break;
1988
1989 default:
1990 break;
1991 }
1992 }
1993 }
1994 while (changed);
1995
1996 /* If we only have two operands, we can't do anything. */
1997 if (n_ops <= 2 && !force)
1998 return NULL_RTX;
1999
2000 /* Count the number of CONSTs we didn't split above. */
2001 for (i = 0; i < n_ops; i++)
2002 if (GET_CODE (ops[i].op) == CONST)
2003 input_consts++;
2004
2005 /* Now simplify each pair of operands until nothing changes. The first
2006 time through just simplify constants against each other. */
2007
2008 first = 1;
2009 do
2010 {
2011 changed = first;
2012
2013 for (i = 0; i < n_ops - 1; i++)
2014 for (j = i + 1; j < n_ops; j++)
2015 {
2016 rtx lhs = ops[i].op, rhs = ops[j].op;
2017 int lneg = ops[i].neg, rneg = ops[j].neg;
2018
2019 if (lhs != 0 && rhs != 0
2020 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2021 {
2022 enum rtx_code ncode = PLUS;
2023
2024 if (lneg != rneg)
2025 {
2026 ncode = MINUS;
2027 if (lneg)
2028 tem = lhs, lhs = rhs, rhs = tem;
2029 }
2030 else if (swap_commutative_operands_p (lhs, rhs))
2031 tem = lhs, lhs = rhs, rhs = tem;
2032
2033 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2034
2035 /* Reject "simplifications" that just wrap the two
2036 arguments in a CONST. Failure to do so can result
2037 in infinite recursion with simplify_binary_operation
2038 when it calls us to simplify CONST operations. */
2039 if (tem
2040 && ! (GET_CODE (tem) == CONST
2041 && GET_CODE (XEXP (tem, 0)) == ncode
2042 && XEXP (XEXP (tem, 0), 0) == lhs
2043 && XEXP (XEXP (tem, 0), 1) == rhs)
2044 /* Don't allow -x + -1 -> ~x simplifications in the
2045 first pass. This allows us the chance to combine
2046 the -1 with other constants. */
2047 && ! (first
2048 && GET_CODE (tem) == NOT
2049 && XEXP (tem, 0) == rhs))
2050 {
2051 lneg &= rneg;
2052 if (GET_CODE (tem) == NEG)
2053 tem = XEXP (tem, 0), lneg = !lneg;
2054 if (GET_CODE (tem) == CONST_INT && lneg)
2055 tem = neg_const_int (mode, tem), lneg = 0;
2056
2057 ops[i].op = tem;
2058 ops[i].neg = lneg;
2059 ops[j].op = NULL_RTX;
2060 changed = 1;
2061 }
2062 }
2063 }
2064
2065 first = 0;
2066 }
2067 while (changed);
2068
2069 /* Pack all the operands to the lower-numbered entries. */
2070 for (i = 0, j = 0; j < n_ops; j++)
2071 if (ops[j].op)
2072 ops[i++] = ops[j];
2073 n_ops = i;
2074
2075 /* Sort the operations based on swap_commutative_operands_p. */
2076 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2077
2078 /* We suppressed creation of trivial CONST expressions in the
2079 combination loop to avoid recursion. Create one manually now.
2080 The combination loop should have ensured that there is exactly
2081 one CONST_INT, and the sort will have ensured that it is last
2082 in the array and that any other constant will be next-to-last. */
2083
2084 if (n_ops > 1
2085 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2086 && CONSTANT_P (ops[n_ops - 2].op))
2087 {
2088 rtx value = ops[n_ops - 1].op;
2089 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2090 value = neg_const_int (mode, value);
2091 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2092 n_ops--;
2093 }
2094
2095 /* Count the number of CONSTs that we generated. */
2096 n_consts = 0;
2097 for (i = 0; i < n_ops; i++)
2098 if (GET_CODE (ops[i].op) == CONST)
2099 n_consts++;
2100
2101 /* Give up if we didn't reduce the number of operands we had. Make
2102 sure we count a CONST as two operands. If we have the same
2103 number of operands, but have made more CONSTs than before, this
2104 is also an improvement, so accept it. */
2105 if (!force
2106 && (n_ops + n_consts > input_ops
2107 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2108 return NULL_RTX;
2109
2110 /* Put a non-negated operand first. If there aren't any, make all
2111 operands positive and negate the whole thing later. */
2112
2113 negate = 0;
2114 for (i = 0; i < n_ops && ops[i].neg; i++)
2115 continue;
2116 if (i == n_ops)
2117 {
2118 for (i = 0; i < n_ops; i++)
2119 ops[i].neg = 0;
2120 negate = 1;
2121 }
2122 else if (i != 0)
2123 {
2124 tem = ops[0].op;
2125 ops[0] = ops[i];
2126 ops[i].op = tem;
2127 ops[i].neg = 1;
2128 }
2129
2130 /* Now make the result by performing the requested operations. */
2131 result = ops[0].op;
2132 for (i = 1; i < n_ops; i++)
2133 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2134 mode, result, ops[i].op);
2135
2136 return negate ? gen_rtx_NEG (mode, result) : result;
2137 }
2138
2139 /* Like simplify_binary_operation except used for relational operators.
2140 MODE is the mode of the operands, not that of the result. If MODE
2141 is VOIDmode, both operands must also be VOIDmode and we compare the
2142 operands in "infinite precision".
2143
2144 If no simplification is possible, this function returns zero. Otherwise,
2145 it returns either const_true_rtx or const0_rtx. */
2146
2147 rtx
2148 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2149 rtx op0, rtx op1)
2150 {
2151 int equal, op0lt, op0ltu, op1lt, op1ltu;
2152 rtx tem;
2153 rtx trueop0;
2154 rtx trueop1;
2155
2156 if (mode == VOIDmode
2157 && (GET_MODE (op0) != VOIDmode
2158 || GET_MODE (op1) != VOIDmode))
2159 abort ();
2160
2161 /* If op0 is a compare, extract the comparison arguments from it. */
2162 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2163 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2164
2165 trueop0 = avoid_constant_pool_reference (op0);
2166 trueop1 = avoid_constant_pool_reference (op1);
2167
2168 /* We can't simplify MODE_CC values since we don't know what the
2169 actual comparison is. */
2170 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2171 return 0;
2172
2173 /* Make sure the constant is second. */
2174 if (swap_commutative_operands_p (trueop0, trueop1))
2175 {
2176 tem = op0, op0 = op1, op1 = tem;
2177 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2178 code = swap_condition (code);
2179 }
2180
2181 /* For integer comparisons of A and B maybe we can simplify A - B and can
2182 then simplify a comparison of that with zero. If A and B are both either
2183 a register or a CONST_INT, this can't help; testing for these cases will
2184 prevent infinite recursion here and speed things up.
2185
2186 If CODE is an unsigned comparison, then we can never do this optimization,
2187 because it gives an incorrect result if the subtraction wraps around zero.
2188 ANSI C defines unsigned operations such that they never overflow, and
2189 thus such cases can not be ignored. */
2190
2191 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2192 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2193 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2194 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2195 && code != GTU && code != GEU && code != LTU && code != LEU)
2196 return simplify_relational_operation (signed_condition (code),
2197 mode, tem, const0_rtx);
2198
2199 if (flag_unsafe_math_optimizations && code == ORDERED)
2200 return const_true_rtx;
2201
2202 if (flag_unsafe_math_optimizations && code == UNORDERED)
2203 return const0_rtx;
2204
2205 /* For modes without NaNs, if the two operands are equal, we know the
2206 result except if they have side-effects. */
2207 if (! HONOR_NANS (GET_MODE (trueop0))
2208 && rtx_equal_p (trueop0, trueop1)
2209 && ! side_effects_p (trueop0))
2210 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2211
2212 /* If the operands are floating-point constants, see if we can fold
2213 the result. */
2214 else if (GET_CODE (trueop0) == CONST_DOUBLE
2215 && GET_CODE (trueop1) == CONST_DOUBLE
2216 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2217 {
2218 REAL_VALUE_TYPE d0, d1;
2219
2220 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2221 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2222
2223 /* Comparisons are unordered iff at least one of the values is NaN. */
2224 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2225 switch (code)
2226 {
2227 case UNEQ:
2228 case UNLT:
2229 case UNGT:
2230 case UNLE:
2231 case UNGE:
2232 case NE:
2233 case UNORDERED:
2234 return const_true_rtx;
2235 case EQ:
2236 case LT:
2237 case GT:
2238 case LE:
2239 case GE:
2240 case LTGT:
2241 case ORDERED:
2242 return const0_rtx;
2243 default:
2244 return 0;
2245 }
2246
2247 equal = REAL_VALUES_EQUAL (d0, d1);
2248 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2249 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2250 }
2251
2252 /* Otherwise, see if the operands are both integers. */
2253 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2254 && (GET_CODE (trueop0) == CONST_DOUBLE
2255 || GET_CODE (trueop0) == CONST_INT)
2256 && (GET_CODE (trueop1) == CONST_DOUBLE
2257 || GET_CODE (trueop1) == CONST_INT))
2258 {
2259 int width = GET_MODE_BITSIZE (mode);
2260 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2261 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2262
2263 /* Get the two words comprising each integer constant. */
2264 if (GET_CODE (trueop0) == CONST_DOUBLE)
2265 {
2266 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2267 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2268 }
2269 else
2270 {
2271 l0u = l0s = INTVAL (trueop0);
2272 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2273 }
2274
2275 if (GET_CODE (trueop1) == CONST_DOUBLE)
2276 {
2277 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2278 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2279 }
2280 else
2281 {
2282 l1u = l1s = INTVAL (trueop1);
2283 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2284 }
2285
2286 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2287 we have to sign or zero-extend the values. */
2288 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2289 {
2290 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2291 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2292
2293 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2294 l0s |= ((HOST_WIDE_INT) (-1) << width);
2295
2296 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2297 l1s |= ((HOST_WIDE_INT) (-1) << width);
2298 }
2299 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2300 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2301
2302 equal = (h0u == h1u && l0u == l1u);
2303 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2304 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2305 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2306 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2307 }
2308
2309 /* Otherwise, there are some code-specific tests we can make. */
2310 else
2311 {
2312 switch (code)
2313 {
2314 case EQ:
2315 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2316 return const0_rtx;
2317 break;
2318
2319 case NE:
2320 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2321 return const_true_rtx;
2322 break;
2323
2324 case GEU:
2325 /* Unsigned values are never negative. */
2326 if (trueop1 == const0_rtx)
2327 return const_true_rtx;
2328 break;
2329
2330 case LTU:
2331 if (trueop1 == const0_rtx)
2332 return const0_rtx;
2333 break;
2334
2335 case LEU:
2336 /* Unsigned values are never greater than the largest
2337 unsigned value. */
2338 if (GET_CODE (trueop1) == CONST_INT
2339 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2340 && INTEGRAL_MODE_P (mode))
2341 return const_true_rtx;
2342 break;
2343
2344 case GTU:
2345 if (GET_CODE (trueop1) == CONST_INT
2346 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2347 && INTEGRAL_MODE_P (mode))
2348 return const0_rtx;
2349 break;
2350
2351 case LT:
2352 /* Optimize abs(x) < 0.0. */
2353 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2354 {
2355 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2356 : trueop0;
2357 if (GET_CODE (tem) == ABS)
2358 return const0_rtx;
2359 }
2360 break;
2361
2362 case GE:
2363 /* Optimize abs(x) >= 0.0. */
2364 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2365 {
2366 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2367 : trueop0;
2368 if (GET_CODE (tem) == ABS)
2369 return const_true_rtx;
2370 }
2371 break;
2372
2373 case UNGE:
2374 /* Optimize ! (abs(x) < 0.0). */
2375 if (trueop1 == CONST0_RTX (mode))
2376 {
2377 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2378 : trueop0;
2379 if (GET_CODE (tem) == ABS)
2380 return const_true_rtx;
2381 }
2382 break;
2383
2384 default:
2385 break;
2386 }
2387
2388 return 0;
2389 }
2390
2391 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2392 as appropriate. */
2393 switch (code)
2394 {
2395 case EQ:
2396 case UNEQ:
2397 return equal ? const_true_rtx : const0_rtx;
2398 case NE:
2399 case LTGT:
2400 return ! equal ? const_true_rtx : const0_rtx;
2401 case LT:
2402 case UNLT:
2403 return op0lt ? const_true_rtx : const0_rtx;
2404 case GT:
2405 case UNGT:
2406 return op1lt ? const_true_rtx : const0_rtx;
2407 case LTU:
2408 return op0ltu ? const_true_rtx : const0_rtx;
2409 case GTU:
2410 return op1ltu ? const_true_rtx : const0_rtx;
2411 case LE:
2412 case UNLE:
2413 return equal || op0lt ? const_true_rtx : const0_rtx;
2414 case GE:
2415 case UNGE:
2416 return equal || op1lt ? const_true_rtx : const0_rtx;
2417 case LEU:
2418 return equal || op0ltu ? const_true_rtx : const0_rtx;
2419 case GEU:
2420 return equal || op1ltu ? const_true_rtx : const0_rtx;
2421 case ORDERED:
2422 return const_true_rtx;
2423 case UNORDERED:
2424 return const0_rtx;
2425 default:
2426 abort ();
2427 }
2428 }
2429 \f
2430 /* Simplify CODE, an operation with result mode MODE and three operands,
2431 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2432 a constant. Return 0 if no simplifications is possible. */
2433
2434 rtx
2435 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2436 enum machine_mode op0_mode, rtx op0, rtx op1,
2437 rtx op2)
2438 {
2439 unsigned int width = GET_MODE_BITSIZE (mode);
2440
2441 /* VOIDmode means "infinite" precision. */
2442 if (width == 0)
2443 width = HOST_BITS_PER_WIDE_INT;
2444
2445 switch (code)
2446 {
2447 case SIGN_EXTRACT:
2448 case ZERO_EXTRACT:
2449 if (GET_CODE (op0) == CONST_INT
2450 && GET_CODE (op1) == CONST_INT
2451 && GET_CODE (op2) == CONST_INT
2452 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2453 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2454 {
2455 /* Extracting a bit-field from a constant */
2456 HOST_WIDE_INT val = INTVAL (op0);
2457
2458 if (BITS_BIG_ENDIAN)
2459 val >>= (GET_MODE_BITSIZE (op0_mode)
2460 - INTVAL (op2) - INTVAL (op1));
2461 else
2462 val >>= INTVAL (op2);
2463
2464 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2465 {
2466 /* First zero-extend. */
2467 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2468 /* If desired, propagate sign bit. */
2469 if (code == SIGN_EXTRACT
2470 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2471 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2472 }
2473
2474 /* Clear the bits that don't belong in our mode,
2475 unless they and our sign bit are all one.
2476 So we get either a reasonable negative value or a reasonable
2477 unsigned value for this mode. */
2478 if (width < HOST_BITS_PER_WIDE_INT
2479 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2480 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2481 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2482
2483 return GEN_INT (val);
2484 }
2485 break;
2486
2487 case IF_THEN_ELSE:
2488 if (GET_CODE (op0) == CONST_INT)
2489 return op0 != const0_rtx ? op1 : op2;
2490
2491 /* Convert a == b ? b : a to "a". */
2492 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2493 && !HONOR_NANS (mode)
2494 && rtx_equal_p (XEXP (op0, 0), op1)
2495 && rtx_equal_p (XEXP (op0, 1), op2))
2496 return op1;
2497 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2498 && !HONOR_NANS (mode)
2499 && rtx_equal_p (XEXP (op0, 1), op1)
2500 && rtx_equal_p (XEXP (op0, 0), op2))
2501 return op2;
2502 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2503 {
2504 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2505 ? GET_MODE (XEXP (op0, 1))
2506 : GET_MODE (XEXP (op0, 0)));
2507 rtx temp;
2508 if (cmp_mode == VOIDmode)
2509 cmp_mode = op0_mode;
2510 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2511 XEXP (op0, 0), XEXP (op0, 1));
2512
2513 /* See if any simplifications were possible. */
2514 if (temp == const0_rtx)
2515 return op2;
2516 else if (temp == const1_rtx)
2517 return op1;
2518 else if (temp)
2519 op0 = temp;
2520
2521 /* Look for happy constants in op1 and op2. */
2522 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2523 {
2524 HOST_WIDE_INT t = INTVAL (op1);
2525 HOST_WIDE_INT f = INTVAL (op2);
2526
2527 if (t == STORE_FLAG_VALUE && f == 0)
2528 code = GET_CODE (op0);
2529 else if (t == 0 && f == STORE_FLAG_VALUE)
2530 {
2531 enum rtx_code tmp;
2532 tmp = reversed_comparison_code (op0, NULL_RTX);
2533 if (tmp == UNKNOWN)
2534 break;
2535 code = tmp;
2536 }
2537 else
2538 break;
2539
2540 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2541 }
2542 }
2543 break;
2544 case VEC_MERGE:
2545 if (GET_MODE (op0) != mode
2546 || GET_MODE (op1) != mode
2547 || !VECTOR_MODE_P (mode))
2548 abort ();
2549 op2 = avoid_constant_pool_reference (op2);
2550 if (GET_CODE (op2) == CONST_INT)
2551 {
2552 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2553 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2554 int mask = (1 << n_elts) - 1;
2555
2556 if (!(INTVAL (op2) & mask))
2557 return op1;
2558 if ((INTVAL (op2) & mask) == mask)
2559 return op0;
2560
2561 op0 = avoid_constant_pool_reference (op0);
2562 op1 = avoid_constant_pool_reference (op1);
2563 if (GET_CODE (op0) == CONST_VECTOR
2564 && GET_CODE (op1) == CONST_VECTOR)
2565 {
2566 rtvec v = rtvec_alloc (n_elts);
2567 unsigned int i;
2568
2569 for (i = 0; i < n_elts; i++)
2570 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2571 ? CONST_VECTOR_ELT (op0, i)
2572 : CONST_VECTOR_ELT (op1, i));
2573 return gen_rtx_CONST_VECTOR (mode, v);
2574 }
2575 }
2576 break;
2577
2578 default:
2579 abort ();
2580 }
2581
2582 return 0;
2583 }
2584
2585 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2586 Return 0 if no simplifications is possible. */
2587 rtx
2588 simplify_subreg (enum machine_mode outermode, rtx op,
2589 enum machine_mode innermode, unsigned int byte)
2590 {
2591 /* Little bit of sanity checking. */
2592 if (innermode == VOIDmode || outermode == VOIDmode
2593 || innermode == BLKmode || outermode == BLKmode)
2594 abort ();
2595
2596 if (GET_MODE (op) != innermode
2597 && GET_MODE (op) != VOIDmode)
2598 abort ();
2599
2600 if (byte % GET_MODE_SIZE (outermode)
2601 || byte >= GET_MODE_SIZE (innermode))
2602 abort ();
2603
2604 if (outermode == innermode && !byte)
2605 return op;
2606
2607 /* Simplify subregs of vector constants. */
2608 if (GET_CODE (op) == CONST_VECTOR)
2609 {
2610 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2611 const unsigned int offset = byte / elt_size;
2612 rtx elt;
2613
2614 if (GET_MODE_INNER (innermode) == outermode)
2615 {
2616 elt = CONST_VECTOR_ELT (op, offset);
2617
2618 /* ?? We probably don't need this copy_rtx because constants
2619 can be shared. ?? */
2620
2621 return copy_rtx (elt);
2622 }
2623 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2624 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2625 {
2626 return (gen_rtx_CONST_VECTOR
2627 (outermode,
2628 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2629 &CONST_VECTOR_ELT (op, offset))));
2630 }
2631 else if (GET_MODE_CLASS (outermode) == MODE_INT
2632 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2633 {
2634 /* This happens when the target register size is smaller then
2635 the vector mode, and we synthesize operations with vectors
2636 of elements that are smaller than the register size. */
2637 HOST_WIDE_INT sum = 0, high = 0;
2638 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2639 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2640 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2641 int shift = BITS_PER_UNIT * elt_size;
2642 unsigned HOST_WIDE_INT unit_mask;
2643
2644 unit_mask = (unsigned HOST_WIDE_INT) -1
2645 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2646
2647 for (; n_elts--; i += step)
2648 {
2649 elt = CONST_VECTOR_ELT (op, i);
2650 if (GET_CODE (elt) == CONST_DOUBLE
2651 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2652 {
2653 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2654 elt);
2655 if (! elt)
2656 return NULL_RTX;
2657 }
2658 if (GET_CODE (elt) != CONST_INT)
2659 return NULL_RTX;
2660 /* Avoid overflow. */
2661 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2662 return NULL_RTX;
2663 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2664 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2665 }
2666 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2667 return GEN_INT (trunc_int_for_mode (sum, outermode));
2668 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2669 return immed_double_const (sum, high, outermode);
2670 else
2671 return NULL_RTX;
2672 }
2673 else if (GET_MODE_CLASS (outermode) == MODE_INT
2674 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2675 {
2676 enum machine_mode new_mode
2677 = int_mode_for_mode (GET_MODE_INNER (innermode));
2678 int subbyte = byte % elt_size;
2679
2680 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2681 if (! op)
2682 return NULL_RTX;
2683 return simplify_subreg (outermode, op, new_mode, subbyte);
2684 }
2685 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2686 /* This shouldn't happen, but let's not do anything stupid. */
2687 return NULL_RTX;
2688 }
2689
2690 /* Attempt to simplify constant to non-SUBREG expression. */
2691 if (CONSTANT_P (op))
2692 {
2693 int offset, part;
2694 unsigned HOST_WIDE_INT val = 0;
2695
2696 if (VECTOR_MODE_P (outermode))
2697 {
2698 /* Construct a CONST_VECTOR from individual subregs. */
2699 enum machine_mode submode = GET_MODE_INNER (outermode);
2700 int subsize = GET_MODE_UNIT_SIZE (outermode);
2701 int i, elts = GET_MODE_NUNITS (outermode);
2702 rtvec v = rtvec_alloc (elts);
2703 rtx elt;
2704
2705 for (i = 0; i < elts; i++, byte += subsize)
2706 {
2707 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2708 /* ??? It would be nice if we could actually make such subregs
2709 on targets that allow such relocations. */
2710 if (byte >= GET_MODE_SIZE (innermode))
2711 elt = CONST0_RTX (submode);
2712 else
2713 elt = simplify_subreg (submode, op, innermode, byte);
2714 if (! elt)
2715 return NULL_RTX;
2716 RTVEC_ELT (v, i) = elt;
2717 }
2718 return gen_rtx_CONST_VECTOR (outermode, v);
2719 }
2720
2721 /* ??? This code is partly redundant with code below, but can handle
2722 the subregs of floats and similar corner cases.
2723 Later it we should move all simplification code here and rewrite
2724 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2725 using SIMPLIFY_SUBREG. */
2726 if (subreg_lowpart_offset (outermode, innermode) == byte
2727 && GET_CODE (op) != CONST_VECTOR)
2728 {
2729 rtx new = gen_lowpart_if_possible (outermode, op);
2730 if (new)
2731 return new;
2732 }
2733
2734 /* Similar comment as above apply here. */
2735 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2736 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2737 && GET_MODE_CLASS (outermode) == MODE_INT)
2738 {
2739 rtx new = constant_subword (op,
2740 (byte / UNITS_PER_WORD),
2741 innermode);
2742 if (new)
2743 return new;
2744 }
2745
2746 if (GET_MODE_CLASS (outermode) != MODE_INT
2747 && GET_MODE_CLASS (outermode) != MODE_CC)
2748 {
2749 enum machine_mode new_mode = int_mode_for_mode (outermode);
2750
2751 if (new_mode != innermode || byte != 0)
2752 {
2753 op = simplify_subreg (new_mode, op, innermode, byte);
2754 if (! op)
2755 return NULL_RTX;
2756 return simplify_subreg (outermode, op, new_mode, 0);
2757 }
2758 }
2759
2760 offset = byte * BITS_PER_UNIT;
2761 switch (GET_CODE (op))
2762 {
2763 case CONST_DOUBLE:
2764 if (GET_MODE (op) != VOIDmode)
2765 break;
2766
2767 /* We can't handle this case yet. */
2768 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2769 return NULL_RTX;
2770
2771 part = offset >= HOST_BITS_PER_WIDE_INT;
2772 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2773 && BYTES_BIG_ENDIAN)
2774 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2775 && WORDS_BIG_ENDIAN))
2776 part = !part;
2777 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2778 offset %= HOST_BITS_PER_WIDE_INT;
2779
2780 /* We've already picked the word we want from a double, so
2781 pretend this is actually an integer. */
2782 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2783
2784 /* FALLTHROUGH */
2785 case CONST_INT:
2786 if (GET_CODE (op) == CONST_INT)
2787 val = INTVAL (op);
2788
2789 /* We don't handle synthesizing of non-integral constants yet. */
2790 if (GET_MODE_CLASS (outermode) != MODE_INT)
2791 return NULL_RTX;
2792
2793 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2794 {
2795 if (WORDS_BIG_ENDIAN)
2796 offset = (GET_MODE_BITSIZE (innermode)
2797 - GET_MODE_BITSIZE (outermode) - offset);
2798 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2799 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2800 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2801 - 2 * (offset % BITS_PER_WORD));
2802 }
2803
2804 if (offset >= HOST_BITS_PER_WIDE_INT)
2805 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2806 else
2807 {
2808 val >>= offset;
2809 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2810 val = trunc_int_for_mode (val, outermode);
2811 return GEN_INT (val);
2812 }
2813 default:
2814 break;
2815 }
2816 }
2817
2818 /* Changing mode twice with SUBREG => just change it once,
2819 or not at all if changing back op starting mode. */
2820 if (GET_CODE (op) == SUBREG)
2821 {
2822 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2823 int final_offset = byte + SUBREG_BYTE (op);
2824 rtx new;
2825
2826 if (outermode == innermostmode
2827 && byte == 0 && SUBREG_BYTE (op) == 0)
2828 return SUBREG_REG (op);
2829
2830 /* The SUBREG_BYTE represents offset, as if the value were stored
2831 in memory. Irritating exception is paradoxical subreg, where
2832 we define SUBREG_BYTE to be 0. On big endian machines, this
2833 value should be negative. For a moment, undo this exception. */
2834 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2835 {
2836 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2837 if (WORDS_BIG_ENDIAN)
2838 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2839 if (BYTES_BIG_ENDIAN)
2840 final_offset += difference % UNITS_PER_WORD;
2841 }
2842 if (SUBREG_BYTE (op) == 0
2843 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2844 {
2845 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2846 if (WORDS_BIG_ENDIAN)
2847 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2848 if (BYTES_BIG_ENDIAN)
2849 final_offset += difference % UNITS_PER_WORD;
2850 }
2851
2852 /* See whether resulting subreg will be paradoxical. */
2853 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2854 {
2855 /* In nonparadoxical subregs we can't handle negative offsets. */
2856 if (final_offset < 0)
2857 return NULL_RTX;
2858 /* Bail out in case resulting subreg would be incorrect. */
2859 if (final_offset % GET_MODE_SIZE (outermode)
2860 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2861 return NULL_RTX;
2862 }
2863 else
2864 {
2865 int offset = 0;
2866 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2867
2868 /* In paradoxical subreg, see if we are still looking on lower part.
2869 If so, our SUBREG_BYTE will be 0. */
2870 if (WORDS_BIG_ENDIAN)
2871 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2872 if (BYTES_BIG_ENDIAN)
2873 offset += difference % UNITS_PER_WORD;
2874 if (offset == final_offset)
2875 final_offset = 0;
2876 else
2877 return NULL_RTX;
2878 }
2879
2880 /* Recurse for further possible simplifications. */
2881 new = simplify_subreg (outermode, SUBREG_REG (op),
2882 GET_MODE (SUBREG_REG (op)),
2883 final_offset);
2884 if (new)
2885 return new;
2886 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2887 }
2888
2889 /* SUBREG of a hard register => just change the register number
2890 and/or mode. If the hard register is not valid in that mode,
2891 suppress this simplification. If the hard register is the stack,
2892 frame, or argument pointer, leave this as a SUBREG. */
2893
2894 if (REG_P (op)
2895 && (! REG_FUNCTION_VALUE_P (op)
2896 || ! rtx_equal_function_value_matters)
2897 && REGNO (op) < FIRST_PSEUDO_REGISTER
2898 #ifdef CANNOT_CHANGE_MODE_CLASS
2899 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2900 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2901 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2902 #endif
2903 && ((reload_completed && !frame_pointer_needed)
2904 || (REGNO (op) != FRAME_POINTER_REGNUM
2905 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2906 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2907 #endif
2908 ))
2909 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2910 && REGNO (op) != ARG_POINTER_REGNUM
2911 #endif
2912 && REGNO (op) != STACK_POINTER_REGNUM)
2913 {
2914 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2915 0);
2916
2917 /* ??? We do allow it if the current REG is not valid for
2918 its mode. This is a kludge to work around how float/complex
2919 arguments are passed on 32-bit SPARC and should be fixed. */
2920 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2921 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2922 {
2923 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2924
2925 /* Propagate original regno. We don't have any way to specify
2926 the offset inside original regno, so do so only for lowpart.
2927 The information is used only by alias analysis that can not
2928 grog partial register anyway. */
2929
2930 if (subreg_lowpart_offset (outermode, innermode) == byte)
2931 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2932 return x;
2933 }
2934 }
2935
2936 /* If we have a SUBREG of a register that we are replacing and we are
2937 replacing it with a MEM, make a new MEM and try replacing the
2938 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2939 or if we would be widening it. */
2940
2941 if (GET_CODE (op) == MEM
2942 && ! mode_dependent_address_p (XEXP (op, 0))
2943 /* Allow splitting of volatile memory references in case we don't
2944 have instruction to move the whole thing. */
2945 && (! MEM_VOLATILE_P (op)
2946 || ! have_insn_for (SET, innermode))
2947 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2948 return adjust_address_nv (op, outermode, byte);
2949
2950 /* Handle complex values represented as CONCAT
2951 of real and imaginary part. */
2952 if (GET_CODE (op) == CONCAT)
2953 {
2954 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2955 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2956 unsigned int final_offset;
2957 rtx res;
2958
2959 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2960 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2961 if (res)
2962 return res;
2963 /* We can at least simplify it by referring directly to the relevant part. */
2964 return gen_rtx_SUBREG (outermode, part, final_offset);
2965 }
2966
2967 return NULL_RTX;
2968 }
2969 /* Make a SUBREG operation or equivalent if it folds. */
2970
2971 rtx
2972 simplify_gen_subreg (enum machine_mode outermode, rtx op,
2973 enum machine_mode innermode, unsigned int byte)
2974 {
2975 rtx new;
2976 /* Little bit of sanity checking. */
2977 if (innermode == VOIDmode || outermode == VOIDmode
2978 || innermode == BLKmode || outermode == BLKmode)
2979 abort ();
2980
2981 if (GET_MODE (op) != innermode
2982 && GET_MODE (op) != VOIDmode)
2983 abort ();
2984
2985 if (byte % GET_MODE_SIZE (outermode)
2986 || byte >= GET_MODE_SIZE (innermode))
2987 abort ();
2988
2989 if (GET_CODE (op) == QUEUED)
2990 return NULL_RTX;
2991
2992 new = simplify_subreg (outermode, op, innermode, byte);
2993 if (new)
2994 return new;
2995
2996 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2997 return NULL_RTX;
2998
2999 return gen_rtx_SUBREG (outermode, op, byte);
3000 }
3001 /* Simplify X, an rtx expression.
3002
3003 Return the simplified expression or NULL if no simplifications
3004 were possible.
3005
3006 This is the preferred entry point into the simplification routines;
3007 however, we still allow passes to call the more specific routines.
3008
3009 Right now GCC has three (yes, three) major bodies of RTL simplification
3010 code that need to be unified.
3011
3012 1. fold_rtx in cse.c. This code uses various CSE specific
3013 information to aid in RTL simplification.
3014
3015 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3016 it uses combine specific information to aid in RTL
3017 simplification.
3018
3019 3. The routines in this file.
3020
3021
3022 Long term we want to only have one body of simplification code; to
3023 get to that state I recommend the following steps:
3024
3025 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3026 which are not pass dependent state into these routines.
3027
3028 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3029 use this routine whenever possible.
3030
3031 3. Allow for pass dependent state to be provided to these
3032 routines and add simplifications based on the pass dependent
3033 state. Remove code from cse.c & combine.c that becomes
3034 redundant/dead.
3035
3036 It will take time, but ultimately the compiler will be easier to
3037 maintain and improve. It's totally silly that when we add a
3038 simplification that it needs to be added to 4 places (3 for RTL
3039 simplification and 1 for tree simplification. */
3040
3041 rtx
3042 simplify_rtx (rtx x)
3043 {
3044 enum rtx_code code = GET_CODE (x);
3045 enum machine_mode mode = GET_MODE (x);
3046 rtx temp;
3047
3048 switch (GET_RTX_CLASS (code))
3049 {
3050 case '1':
3051 return simplify_unary_operation (code, mode,
3052 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3053 case 'c':
3054 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3055 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3056
3057 /* ... fall through ... */
3058
3059 case '2':
3060 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3061
3062 case '3':
3063 case 'b':
3064 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3065 XEXP (x, 0), XEXP (x, 1),
3066 XEXP (x, 2));
3067
3068 case '<':
3069 temp = simplify_relational_operation (code,
3070 ((GET_MODE (XEXP (x, 0))
3071 != VOIDmode)
3072 ? GET_MODE (XEXP (x, 0))
3073 : GET_MODE (XEXP (x, 1))),
3074 XEXP (x, 0), XEXP (x, 1));
3075 #ifdef FLOAT_STORE_FLAG_VALUE
3076 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3077 {
3078 if (temp == const0_rtx)
3079 temp = CONST0_RTX (mode);
3080 else
3081 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3082 mode);
3083 }
3084 #endif
3085 return temp;
3086
3087 case 'x':
3088 if (code == SUBREG)
3089 return simplify_gen_subreg (mode, SUBREG_REG (x),
3090 GET_MODE (SUBREG_REG (x)),
3091 SUBREG_BYTE (x));
3092 if (code == CONSTANT_P_RTX)
3093 {
3094 if (CONSTANT_P (XEXP (x, 0)))
3095 return const1_rtx;
3096 }
3097 break;
3098
3099 case 'o':
3100 if (code == LO_SUM)
3101 {
3102 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3103 if (GET_CODE (XEXP (x, 0)) == HIGH
3104 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3105 return XEXP (x, 1);
3106 }
3107 break;
3108
3109 default:
3110 break;
3111 }
3112 return NULL;
3113 }