rtl.h (validate_subreg): Declare.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 \f
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, rtx i)
67 {
68 return gen_int_mode (- INTVAL (i), mode);
69 }
70
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
73
74 bool
75 mode_signbit_p (enum machine_mode mode, rtx x)
76 {
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
79
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
82
83 width = GET_MODE_BITSIZE (mode);
84 if (width == 0)
85 return false;
86
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && GET_CODE (x) == CONST_INT)
89 val = INTVAL (x);
90 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_DOUBLE
92 && CONST_DOUBLE_LOW (x) == 0)
93 {
94 val = CONST_DOUBLE_HIGH (x);
95 width -= HOST_BITS_PER_WIDE_INT;
96 }
97 else
98 return false;
99
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
103 }
104 \f
105 /* Make a binary operation by properly ordering the operands and
106 seeing if the expression folds. */
107
108 rtx
109 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
110 rtx op1)
111 {
112 rtx tem;
113
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
118
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
121 if (tem)
122 return tem;
123
124 /* Handle addition and subtraction specially. Otherwise, just form
125 the operation. */
126
127 if (code == PLUS || code == MINUS)
128 {
129 tem = simplify_plus_minus (code, mode, op0, op1, 1);
130 if (tem)
131 return tem;
132 }
133
134 return gen_rtx_fmt_ee (code, mode, op0, op1);
135 }
136 \f
137 /* If X is a MEM referencing the constant pool, return the real value.
138 Otherwise return X. */
139 rtx
140 avoid_constant_pool_reference (rtx x)
141 {
142 rtx c, tmp, addr;
143 enum machine_mode cmode;
144
145 switch (GET_CODE (x))
146 {
147 case MEM:
148 break;
149
150 case FLOAT_EXTEND:
151 /* Handle float extensions of constant pool references. */
152 tmp = XEXP (x, 0);
153 c = avoid_constant_pool_reference (tmp);
154 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
155 {
156 REAL_VALUE_TYPE d;
157
158 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
159 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
160 }
161 return x;
162
163 default:
164 return x;
165 }
166
167 addr = XEXP (x, 0);
168
169 /* Call target hook to avoid the effects of -fpic etc.... */
170 addr = targetm.delegitimize_address (addr);
171
172 if (GET_CODE (addr) == LO_SUM)
173 addr = XEXP (addr, 1);
174
175 if (GET_CODE (addr) != SYMBOL_REF
176 || ! CONSTANT_POOL_ADDRESS_P (addr))
177 return x;
178
179 c = get_pool_constant (addr);
180 cmode = get_pool_mode (addr);
181
182 /* If we're accessing the constant in a different mode than it was
183 originally stored, attempt to fix that up via subreg simplifications.
184 If that fails we have no choice but to return the original memory. */
185 if (cmode != GET_MODE (x))
186 {
187 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
188 return c ? c : x;
189 }
190
191 return c;
192 }
193 \f
194 /* Make a unary operation by first seeing if it folds and otherwise making
195 the specified operation. */
196
197 rtx
198 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
199 enum machine_mode op_mode)
200 {
201 rtx tem;
202
203 /* If this simplifies, use it. */
204 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
205 return tem;
206
207 return gen_rtx_fmt_e (code, mode, op);
208 }
209
210 /* Likewise for ternary operations. */
211
212 rtx
213 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
214 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
215 {
216 rtx tem;
217
218 /* If this simplifies, use it. */
219 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
220 op0, op1, op2)))
221 return tem;
222
223 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
224 }
225
226 /* Likewise, for relational operations.
227 CMP_MODE specifies mode comparison is done in. */
228
229 rtx
230 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode cmp_mode, rtx op0, rtx op1)
232 {
233 rtx tem;
234
235 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
236 op0, op1)))
237 return tem;
238
239 return gen_rtx_fmt_ee (code, mode, op0, op1);
240 }
241 \f
242 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
243 resulting RTX. Return a new RTX which is as simplified as possible. */
244
245 rtx
246 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
247 {
248 enum rtx_code code = GET_CODE (x);
249 enum machine_mode mode = GET_MODE (x);
250 enum machine_mode op_mode;
251 rtx op0, op1, op2;
252
253 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
254 to build a new expression substituting recursively. If we can't do
255 anything, return our input. */
256
257 if (x == old_rtx)
258 return new_rtx;
259
260 switch (GET_RTX_CLASS (code))
261 {
262 case RTX_UNARY:
263 op0 = XEXP (x, 0);
264 op_mode = GET_MODE (op0);
265 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
266 if (op0 == XEXP (x, 0))
267 return x;
268 return simplify_gen_unary (code, mode, op0, op_mode);
269
270 case RTX_BIN_ARITH:
271 case RTX_COMM_ARITH:
272 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
273 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
274 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
275 return x;
276 return simplify_gen_binary (code, mode, op0, op1);
277
278 case RTX_COMPARE:
279 case RTX_COMM_COMPARE:
280 op0 = XEXP (x, 0);
281 op1 = XEXP (x, 1);
282 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
283 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_relational (code, mode, op_mode, op0, op1);
288
289 case RTX_TERNARY:
290 case RTX_BITFIELD_OPS:
291 op0 = XEXP (x, 0);
292 op_mode = GET_MODE (op0);
293 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
294 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
295 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
297 return x;
298 if (op_mode == VOIDmode)
299 op_mode = GET_MODE (op0);
300 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
301
302 case RTX_EXTRA:
303 /* The only case we try to handle is a SUBREG. */
304 if (code == SUBREG)
305 {
306 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
307 if (op0 == SUBREG_REG (x))
308 return x;
309 op0 = simplify_gen_subreg (GET_MODE (x), op0,
310 GET_MODE (SUBREG_REG (x)),
311 SUBREG_BYTE (x));
312 return op0 ? op0 : x;
313 }
314 break;
315
316 case RTX_OBJ:
317 if (code == MEM)
318 {
319 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
320 if (op0 == XEXP (x, 0))
321 return x;
322 return replace_equiv_address_nv (x, op0);
323 }
324 else if (code == LO_SUM)
325 {
326 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
327 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
328
329 /* (lo_sum (high x) x) -> x */
330 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
331 return op1;
332
333 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
334 return x;
335 return gen_rtx_LO_SUM (mode, op0, op1);
336 }
337 else if (code == REG)
338 {
339 if (REG_P (old_rtx) && REGNO (x) == REGNO (old_rtx))
340 return new_rtx;
341 }
342 break;
343
344 default:
345 break;
346 }
347 return x;
348 }
349 \f
350 /* Try to simplify a unary operation CODE whose output mode is to be
351 MODE with input operand OP whose mode was originally OP_MODE.
352 Return zero if no simplification can be made. */
353 rtx
354 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
355 rtx op, enum machine_mode op_mode)
356 {
357 unsigned int width = GET_MODE_BITSIZE (mode);
358 rtx trueop = avoid_constant_pool_reference (op);
359
360 if (code == VEC_DUPLICATE)
361 {
362 gcc_assert (VECTOR_MODE_P (mode));
363 if (GET_MODE (trueop) != VOIDmode)
364 {
365 if (!VECTOR_MODE_P (GET_MODE (trueop)))
366 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
367 else
368 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
369 (GET_MODE (trueop)));
370 }
371 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
372 || GET_CODE (trueop) == CONST_VECTOR)
373 {
374 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
375 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
376 rtvec v = rtvec_alloc (n_elts);
377 unsigned int i;
378
379 if (GET_CODE (trueop) != CONST_VECTOR)
380 for (i = 0; i < n_elts; i++)
381 RTVEC_ELT (v, i) = trueop;
382 else
383 {
384 enum machine_mode inmode = GET_MODE (trueop);
385 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
386 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
387
388 gcc_assert (in_n_elts < n_elts);
389 gcc_assert ((n_elts % in_n_elts) == 0);
390 for (i = 0; i < n_elts; i++)
391 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
392 }
393 return gen_rtx_CONST_VECTOR (mode, v);
394 }
395 }
396 else if (GET_CODE (op) == CONST)
397 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
398
399 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
400 {
401 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
402 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
403 enum machine_mode opmode = GET_MODE (trueop);
404 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
405 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
406 rtvec v = rtvec_alloc (n_elts);
407 unsigned int i;
408
409 gcc_assert (op_n_elts == n_elts);
410 for (i = 0; i < n_elts; i++)
411 {
412 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
413 CONST_VECTOR_ELT (trueop, i),
414 GET_MODE_INNER (opmode));
415 if (!x)
416 return 0;
417 RTVEC_ELT (v, i) = x;
418 }
419 return gen_rtx_CONST_VECTOR (mode, v);
420 }
421
422 /* The order of these tests is critical so that, for example, we don't
423 check the wrong mode (input vs. output) for a conversion operation,
424 such as FIX. At some point, this should be simplified. */
425
426 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
427 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
428 {
429 HOST_WIDE_INT hv, lv;
430 REAL_VALUE_TYPE d;
431
432 if (GET_CODE (trueop) == CONST_INT)
433 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
434 else
435 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
436
437 REAL_VALUE_FROM_INT (d, lv, hv, mode);
438 d = real_value_truncate (mode, d);
439 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
440 }
441 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
442 && (GET_CODE (trueop) == CONST_DOUBLE
443 || GET_CODE (trueop) == CONST_INT))
444 {
445 HOST_WIDE_INT hv, lv;
446 REAL_VALUE_TYPE d;
447
448 if (GET_CODE (trueop) == CONST_INT)
449 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450 else
451 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
452
453 if (op_mode == VOIDmode)
454 {
455 /* We don't know how to interpret negative-looking numbers in
456 this case, so don't try to fold those. */
457 if (hv < 0)
458 return 0;
459 }
460 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
461 ;
462 else
463 hv = 0, lv &= GET_MODE_MASK (op_mode);
464
465 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
466 d = real_value_truncate (mode, d);
467 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
468 }
469
470 if (GET_CODE (trueop) == CONST_INT
471 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
472 {
473 HOST_WIDE_INT arg0 = INTVAL (trueop);
474 HOST_WIDE_INT val;
475
476 switch (code)
477 {
478 case NOT:
479 val = ~ arg0;
480 break;
481
482 case NEG:
483 val = - arg0;
484 break;
485
486 case ABS:
487 val = (arg0 >= 0 ? arg0 : - arg0);
488 break;
489
490 case FFS:
491 /* Don't use ffs here. Instead, get low order bit and then its
492 number. If arg0 is zero, this will return 0, as desired. */
493 arg0 &= GET_MODE_MASK (mode);
494 val = exact_log2 (arg0 & (- arg0)) + 1;
495 break;
496
497 case CLZ:
498 arg0 &= GET_MODE_MASK (mode);
499 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
500 ;
501 else
502 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
503 break;
504
505 case CTZ:
506 arg0 &= GET_MODE_MASK (mode);
507 if (arg0 == 0)
508 {
509 /* Even if the value at zero is undefined, we have to come
510 up with some replacement. Seems good enough. */
511 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
512 val = GET_MODE_BITSIZE (mode);
513 }
514 else
515 val = exact_log2 (arg0 & -arg0);
516 break;
517
518 case POPCOUNT:
519 arg0 &= GET_MODE_MASK (mode);
520 val = 0;
521 while (arg0)
522 val++, arg0 &= arg0 - 1;
523 break;
524
525 case PARITY:
526 arg0 &= GET_MODE_MASK (mode);
527 val = 0;
528 while (arg0)
529 val++, arg0 &= arg0 - 1;
530 val &= 1;
531 break;
532
533 case TRUNCATE:
534 val = arg0;
535 break;
536
537 case ZERO_EXTEND:
538 /* When zero-extending a CONST_INT, we need to know its
539 original mode. */
540 gcc_assert (op_mode != VOIDmode);
541 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
542 {
543 /* If we were really extending the mode,
544 we would have to distinguish between zero-extension
545 and sign-extension. */
546 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
547 val = arg0;
548 }
549 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
550 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
551 else
552 return 0;
553 break;
554
555 case SIGN_EXTEND:
556 if (op_mode == VOIDmode)
557 op_mode = mode;
558 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
559 {
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
564 val = arg0;
565 }
566 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
567 {
568 val
569 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
570 if (val
571 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
572 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
573 }
574 else
575 return 0;
576 break;
577
578 case SQRT:
579 case FLOAT_EXTEND:
580 case FLOAT_TRUNCATE:
581 case SS_TRUNCATE:
582 case US_TRUNCATE:
583 return 0;
584
585 default:
586 gcc_unreachable ();
587 }
588
589 val = trunc_int_for_mode (val, mode);
590
591 return GEN_INT (val);
592 }
593
594 /* We can do some operations on integer CONST_DOUBLEs. Also allow
595 for a DImode operation on a CONST_INT. */
596 else if (GET_MODE (trueop) == VOIDmode
597 && width <= HOST_BITS_PER_WIDE_INT * 2
598 && (GET_CODE (trueop) == CONST_DOUBLE
599 || GET_CODE (trueop) == CONST_INT))
600 {
601 unsigned HOST_WIDE_INT l1, lv;
602 HOST_WIDE_INT h1, hv;
603
604 if (GET_CODE (trueop) == CONST_DOUBLE)
605 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
606 else
607 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
608
609 switch (code)
610 {
611 case NOT:
612 lv = ~ l1;
613 hv = ~ h1;
614 break;
615
616 case NEG:
617 neg_double (l1, h1, &lv, &hv);
618 break;
619
620 case ABS:
621 if (h1 < 0)
622 neg_double (l1, h1, &lv, &hv);
623 else
624 lv = l1, hv = h1;
625 break;
626
627 case FFS:
628 hv = 0;
629 if (l1 == 0)
630 {
631 if (h1 == 0)
632 lv = 0;
633 else
634 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
635 }
636 else
637 lv = exact_log2 (l1 & -l1) + 1;
638 break;
639
640 case CLZ:
641 hv = 0;
642 if (h1 != 0)
643 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
644 - HOST_BITS_PER_WIDE_INT;
645 else if (l1 != 0)
646 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
647 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
648 lv = GET_MODE_BITSIZE (mode);
649 break;
650
651 case CTZ:
652 hv = 0;
653 if (l1 != 0)
654 lv = exact_log2 (l1 & -l1);
655 else if (h1 != 0)
656 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
657 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
658 lv = GET_MODE_BITSIZE (mode);
659 break;
660
661 case POPCOUNT:
662 hv = 0;
663 lv = 0;
664 while (l1)
665 lv++, l1 &= l1 - 1;
666 while (h1)
667 lv++, h1 &= h1 - 1;
668 break;
669
670 case PARITY:
671 hv = 0;
672 lv = 0;
673 while (l1)
674 lv++, l1 &= l1 - 1;
675 while (h1)
676 lv++, h1 &= h1 - 1;
677 lv &= 1;
678 break;
679
680 case TRUNCATE:
681 /* This is just a change-of-mode, so do nothing. */
682 lv = l1, hv = h1;
683 break;
684
685 case ZERO_EXTEND:
686 gcc_assert (op_mode != VOIDmode);
687
688 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
689 return 0;
690
691 hv = 0;
692 lv = l1 & GET_MODE_MASK (op_mode);
693 break;
694
695 case SIGN_EXTEND:
696 if (op_mode == VOIDmode
697 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
698 return 0;
699 else
700 {
701 lv = l1 & GET_MODE_MASK (op_mode);
702 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
703 && (lv & ((HOST_WIDE_INT) 1
704 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
705 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
706
707 hv = HWI_SIGN_EXTEND (lv);
708 }
709 break;
710
711 case SQRT:
712 return 0;
713
714 default:
715 return 0;
716 }
717
718 return immed_double_const (lv, hv, mode);
719 }
720
721 else if (GET_CODE (trueop) == CONST_DOUBLE
722 && GET_MODE_CLASS (mode) == MODE_FLOAT)
723 {
724 REAL_VALUE_TYPE d, t;
725 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
726
727 switch (code)
728 {
729 case SQRT:
730 if (HONOR_SNANS (mode) && real_isnan (&d))
731 return 0;
732 real_sqrt (&t, mode, &d);
733 d = t;
734 break;
735 case ABS:
736 d = REAL_VALUE_ABS (d);
737 break;
738 case NEG:
739 d = REAL_VALUE_NEGATE (d);
740 break;
741 case FLOAT_TRUNCATE:
742 d = real_value_truncate (mode, d);
743 break;
744 case FLOAT_EXTEND:
745 /* All this does is change the mode. */
746 break;
747 case FIX:
748 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
749 break;
750 case NOT:
751 {
752 long tmp[4];
753 int i;
754
755 real_to_target (tmp, &d, GET_MODE (trueop));
756 for (i = 0; i < 4; i++)
757 tmp[i] = ~tmp[i];
758 real_from_target (&d, tmp, mode);
759 }
760 default:
761 gcc_unreachable ();
762 }
763 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
764 }
765
766 else if (GET_CODE (trueop) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode) == MODE_INT
769 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
770 {
771 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
772 operators are intentionally left unspecified (to ease implementation
773 by target backends), for consistency, this routine implements the
774 same semantics for constant folding as used by the middle-end. */
775
776 HOST_WIDE_INT xh, xl, th, tl;
777 REAL_VALUE_TYPE x, t;
778 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
779 switch (code)
780 {
781 case FIX:
782 if (REAL_VALUE_ISNAN (x))
783 return const0_rtx;
784
785 /* Test against the signed upper bound. */
786 if (width > HOST_BITS_PER_WIDE_INT)
787 {
788 th = ((unsigned HOST_WIDE_INT) 1
789 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
790 tl = -1;
791 }
792 else
793 {
794 th = 0;
795 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
796 }
797 real_from_integer (&t, VOIDmode, tl, th, 0);
798 if (REAL_VALUES_LESS (t, x))
799 {
800 xh = th;
801 xl = tl;
802 break;
803 }
804
805 /* Test against the signed lower bound. */
806 if (width > HOST_BITS_PER_WIDE_INT)
807 {
808 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
809 tl = 0;
810 }
811 else
812 {
813 th = -1;
814 tl = (HOST_WIDE_INT) -1 << (width - 1);
815 }
816 real_from_integer (&t, VOIDmode, tl, th, 0);
817 if (REAL_VALUES_LESS (x, t))
818 {
819 xh = th;
820 xl = tl;
821 break;
822 }
823 REAL_VALUE_TO_INT (&xl, &xh, x);
824 break;
825
826 case UNSIGNED_FIX:
827 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
828 return const0_rtx;
829
830 /* Test against the unsigned upper bound. */
831 if (width == 2*HOST_BITS_PER_WIDE_INT)
832 {
833 th = -1;
834 tl = -1;
835 }
836 else if (width >= HOST_BITS_PER_WIDE_INT)
837 {
838 th = ((unsigned HOST_WIDE_INT) 1
839 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
840 tl = -1;
841 }
842 else
843 {
844 th = 0;
845 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
846 }
847 real_from_integer (&t, VOIDmode, tl, th, 1);
848 if (REAL_VALUES_LESS (t, x))
849 {
850 xh = th;
851 xl = tl;
852 break;
853 }
854
855 REAL_VALUE_TO_INT (&xl, &xh, x);
856 break;
857
858 default:
859 gcc_unreachable ();
860 }
861 return immed_double_const (xl, xh, mode);
862 }
863
864 /* This was formerly used only for non-IEEE float.
865 eggert@twinsun.com says it is safe for IEEE also. */
866 else
867 {
868 enum rtx_code reversed;
869 rtx temp;
870
871 /* There are some simplifications we can do even if the operands
872 aren't constant. */
873 switch (code)
874 {
875 case NOT:
876 /* (not (not X)) == X. */
877 if (GET_CODE (op) == NOT)
878 return XEXP (op, 0);
879
880 /* (not (eq X Y)) == (ne X Y), etc. */
881 if (COMPARISON_P (op)
882 && (mode == BImode || STORE_FLAG_VALUE == -1)
883 && ((reversed = reversed_comparison_code (op, NULL_RTX))
884 != UNKNOWN))
885 return simplify_gen_relational (reversed, mode, VOIDmode,
886 XEXP (op, 0), XEXP (op, 1));
887
888 /* (not (plus X -1)) can become (neg X). */
889 if (GET_CODE (op) == PLUS
890 && XEXP (op, 1) == constm1_rtx)
891 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
892
893 /* Similarly, (not (neg X)) is (plus X -1). */
894 if (GET_CODE (op) == NEG)
895 return plus_constant (XEXP (op, 0), -1);
896
897 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
898 if (GET_CODE (op) == XOR
899 && GET_CODE (XEXP (op, 1)) == CONST_INT
900 && (temp = simplify_unary_operation (NOT, mode,
901 XEXP (op, 1),
902 mode)) != 0)
903 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
904
905 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == PLUS
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && mode_signbit_p (mode, XEXP (op, 1))
909 && (temp = simplify_unary_operation (NOT, mode,
910 XEXP (op, 1),
911 mode)) != 0)
912 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913
914
915
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
920 bother with. */
921 if (GET_CODE (op) == ASHIFT
922 && XEXP (op, 0) == const1_rtx)
923 {
924 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
925 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
926 }
927
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE == -1
931 && COMPARISON_P (op)
932 && (reversed = reversed_comparison_code (op, NULL_RTX))
933 != UNKNOWN)
934 return simplify_gen_relational (reversed, mode, VOIDmode,
935 XEXP (op, 0), XEXP (op, 1));
936
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
940
941 if (STORE_FLAG_VALUE == -1
942 && GET_CODE (op) == ASHIFTRT
943 && GET_CODE (XEXP (op, 1)) == CONST_INT
944 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
945 return simplify_gen_relational (GE, mode, VOIDmode,
946 XEXP (op, 0), const0_rtx);
947
948 break;
949
950 case NEG:
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op) == NEG)
953 return XEXP (op, 0);
954
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op) == PLUS
957 && XEXP (op, 1) == const1_rtx)
958 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
959
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op) == NOT)
962 return plus_constant (XEXP (op, 0), 1);
963
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
972 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
973 XEXP (op, 0));
974
975 if (GET_CODE (op) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 {
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op, 1)) == CONST_INT
981 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
982 {
983 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
984 mode);
985 if (temp)
986 return simplify_gen_binary (MINUS, mode, temp,
987 XEXP (op, 0));
988 }
989
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
992 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
993 }
994
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
999 {
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1002 }
1003
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1006 is a constant). */
1007 if (GET_CODE (op) == ASHIFT)
1008 {
1009 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1010 mode);
1011 if (temp)
1012 return simplify_gen_binary (ASHIFT, mode, temp,
1013 XEXP (op, 1));
1014 }
1015
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op) == ASHIFTRT
1019 && GET_CODE (XEXP (op, 1)) == CONST_INT
1020 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1021 return simplify_gen_binary (LSHIFTRT, mode,
1022 XEXP (op, 0), XEXP (op, 1));
1023
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == LSHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (ASHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1031
1032 break;
1033
1034 case SIGN_EXTEND:
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1038 the VAX). */
1039 if (GET_CODE (op) == TRUNCATE
1040 && GET_MODE (XEXP (op, 0)) == mode
1041 && GET_CODE (XEXP (op, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1044 return XEXP (op, 0);
1045
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1052 && GET_MODE (XEXP (op, 0)) == mode)
1053 return XEXP (op, 0);
1054
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode == Pmode && GET_MODE (op) == ptr_mode
1058 && (CONSTANT_P (op)
1059 || (GET_CODE (op) == SUBREG
1060 && REG_P (SUBREG_REG (op))
1061 && REG_POINTER (SUBREG_REG (op))
1062 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1063 return convert_memory_address (Pmode, op);
1064 #endif
1065 break;
1066
1067 case ZERO_EXTEND:
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op)
1074 && GET_MODE (XEXP (op, 0)) == mode)
1075 return XEXP (op, 0);
1076
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED > 0
1079 && mode == Pmode && GET_MODE (op) == ptr_mode
1080 && (CONSTANT_P (op)
1081 || (GET_CODE (op) == SUBREG
1082 && REG_P (SUBREG_REG (op))
1083 && REG_POINTER (SUBREG_REG (op))
1084 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1085 return convert_memory_address (Pmode, op);
1086 #endif
1087 break;
1088
1089 default:
1090 break;
1091 }
1092
1093 return 0;
1094 }
1095 }
1096 \f
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1102
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1106 {
1107 rtx tem;
1108
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1) == code)
1111 {
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0) == code)
1114 {
1115 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1116 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1117 }
1118
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1, op0))
1121 return simplify_gen_binary (code, mode, op1, op0);
1122
1123 tem = op0;
1124 op0 = op1;
1125 op1 = tem;
1126 }
1127
1128 if (GET_CODE (op0) == code)
1129 {
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1132 {
1133 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1134 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1135 }
1136
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1139 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1140 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1141 if (tem != 0)
1142 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1143
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1146 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1147 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1148 if (tem != 0)
1149 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1150 }
1151
1152 return 0;
1153 }
1154
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1157
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1160 rtx
1161 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1162 rtx op0, rtx op1)
1163 {
1164 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1165 HOST_WIDE_INT val;
1166 unsigned int width = GET_MODE_BITSIZE (mode);
1167 rtx trueop0, trueop1;
1168 rtx tem;
1169
1170 /* Relational operations don't work here. We must know the mode
1171 of the operands in order to do the comparison correctly.
1172 Assuming a full word can give incorrect results.
1173 Consider comparing 128 with -128 in QImode. */
1174 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1176
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1179 && swap_commutative_operands_p (op0, op1))
1180 {
1181 tem = op0, op0 = op1, op1 = tem;
1182 }
1183
1184 trueop0 = avoid_constant_pool_reference (op0);
1185 trueop1 = avoid_constant_pool_reference (op1);
1186
1187 if (VECTOR_MODE_P (mode)
1188 && code != VEC_CONCAT
1189 && GET_CODE (trueop0) == CONST_VECTOR
1190 && GET_CODE (trueop1) == CONST_VECTOR)
1191 {
1192 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1193 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1194 enum machine_mode op0mode = GET_MODE (trueop0);
1195 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1196 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1197 enum machine_mode op1mode = GET_MODE (trueop1);
1198 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1199 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1200 rtvec v = rtvec_alloc (n_elts);
1201 unsigned int i;
1202
1203 gcc_assert (op0_n_elts == n_elts);
1204 gcc_assert (op1_n_elts == n_elts);
1205 for (i = 0; i < n_elts; i++)
1206 {
1207 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1208 CONST_VECTOR_ELT (trueop0, i),
1209 CONST_VECTOR_ELT (trueop1, i));
1210 if (!x)
1211 return 0;
1212 RTVEC_ELT (v, i) = x;
1213 }
1214
1215 return gen_rtx_CONST_VECTOR (mode, v);
1216 }
1217
1218 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1219 && GET_CODE (trueop0) == CONST_DOUBLE
1220 && GET_CODE (trueop1) == CONST_DOUBLE
1221 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1222 {
1223 if (code == AND
1224 || code == IOR
1225 || code == XOR)
1226 {
1227 long tmp0[4];
1228 long tmp1[4];
1229 REAL_VALUE_TYPE r;
1230 int i;
1231
1232 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1233 GET_MODE (op0));
1234 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1235 GET_MODE (op1));
1236 for (i = 0; i < 4; i++)
1237 {
1238 switch (code)
1239 {
1240 case AND:
1241 tmp0[i] &= tmp1[i];
1242 break;
1243 case IOR:
1244 tmp0[i] |= tmp1[i];
1245 break;
1246 case XOR:
1247 tmp0[i] ^= tmp1[i];
1248 break;
1249 default:
1250 gcc_unreachable ();
1251 }
1252 }
1253 real_from_target (&r, tmp0, mode);
1254 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1255 }
1256 else
1257 {
1258 REAL_VALUE_TYPE f0, f1, value;
1259
1260 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1261 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1262 f0 = real_value_truncate (mode, f0);
1263 f1 = real_value_truncate (mode, f1);
1264
1265 if (HONOR_SNANS (mode)
1266 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1267 return 0;
1268
1269 if (code == DIV
1270 && REAL_VALUES_EQUAL (f1, dconst0)
1271 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1272 return 0;
1273
1274 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1275 && flag_trapping_math
1276 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1277 {
1278 int s0 = REAL_VALUE_NEGATIVE (f0);
1279 int s1 = REAL_VALUE_NEGATIVE (f1);
1280
1281 switch (code)
1282 {
1283 case PLUS:
1284 /* Inf + -Inf = NaN plus exception. */
1285 if (s0 != s1)
1286 return 0;
1287 break;
1288 case MINUS:
1289 /* Inf - Inf = NaN plus exception. */
1290 if (s0 == s1)
1291 return 0;
1292 break;
1293 case DIV:
1294 /* Inf / Inf = NaN plus exception. */
1295 return 0;
1296 default:
1297 break;
1298 }
1299 }
1300
1301 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1302 && flag_trapping_math
1303 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1304 || (REAL_VALUE_ISINF (f1)
1305 && REAL_VALUES_EQUAL (f0, dconst0))))
1306 /* Inf * 0 = NaN plus exception. */
1307 return 0;
1308
1309 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1310
1311 value = real_value_truncate (mode, value);
1312 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1313 }
1314 }
1315
1316 /* We can fold some multi-word operations. */
1317 if (GET_MODE_CLASS (mode) == MODE_INT
1318 && width == HOST_BITS_PER_WIDE_INT * 2
1319 && (GET_CODE (trueop0) == CONST_DOUBLE
1320 || GET_CODE (trueop0) == CONST_INT)
1321 && (GET_CODE (trueop1) == CONST_DOUBLE
1322 || GET_CODE (trueop1) == CONST_INT))
1323 {
1324 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1325 HOST_WIDE_INT h1, h2, hv, ht;
1326
1327 if (GET_CODE (trueop0) == CONST_DOUBLE)
1328 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1329 else
1330 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1331
1332 if (GET_CODE (trueop1) == CONST_DOUBLE)
1333 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1334 else
1335 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1336
1337 switch (code)
1338 {
1339 case MINUS:
1340 /* A - B == A + (-B). */
1341 neg_double (l2, h2, &lv, &hv);
1342 l2 = lv, h2 = hv;
1343
1344 /* Fall through.... */
1345
1346 case PLUS:
1347 add_double (l1, h1, l2, h2, &lv, &hv);
1348 break;
1349
1350 case MULT:
1351 mul_double (l1, h1, l2, h2, &lv, &hv);
1352 break;
1353
1354 case DIV:
1355 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1356 &lv, &hv, &lt, &ht))
1357 return 0;
1358 break;
1359
1360 case MOD:
1361 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1362 &lt, &ht, &lv, &hv))
1363 return 0;
1364 break;
1365
1366 case UDIV:
1367 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1368 &lv, &hv, &lt, &ht))
1369 return 0;
1370 break;
1371
1372 case UMOD:
1373 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1374 &lt, &ht, &lv, &hv))
1375 return 0;
1376 break;
1377
1378 case AND:
1379 lv = l1 & l2, hv = h1 & h2;
1380 break;
1381
1382 case IOR:
1383 lv = l1 | l2, hv = h1 | h2;
1384 break;
1385
1386 case XOR:
1387 lv = l1 ^ l2, hv = h1 ^ h2;
1388 break;
1389
1390 case SMIN:
1391 if (h1 < h2
1392 || (h1 == h2
1393 && ((unsigned HOST_WIDE_INT) l1
1394 < (unsigned HOST_WIDE_INT) l2)))
1395 lv = l1, hv = h1;
1396 else
1397 lv = l2, hv = h2;
1398 break;
1399
1400 case SMAX:
1401 if (h1 > h2
1402 || (h1 == h2
1403 && ((unsigned HOST_WIDE_INT) l1
1404 > (unsigned HOST_WIDE_INT) l2)))
1405 lv = l1, hv = h1;
1406 else
1407 lv = l2, hv = h2;
1408 break;
1409
1410 case UMIN:
1411 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1412 || (h1 == h2
1413 && ((unsigned HOST_WIDE_INT) l1
1414 < (unsigned HOST_WIDE_INT) l2)))
1415 lv = l1, hv = h1;
1416 else
1417 lv = l2, hv = h2;
1418 break;
1419
1420 case UMAX:
1421 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1422 || (h1 == h2
1423 && ((unsigned HOST_WIDE_INT) l1
1424 > (unsigned HOST_WIDE_INT) l2)))
1425 lv = l1, hv = h1;
1426 else
1427 lv = l2, hv = h2;
1428 break;
1429
1430 case LSHIFTRT: case ASHIFTRT:
1431 case ASHIFT:
1432 case ROTATE: case ROTATERT:
1433 if (SHIFT_COUNT_TRUNCATED)
1434 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1435
1436 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1437 return 0;
1438
1439 if (code == LSHIFTRT || code == ASHIFTRT)
1440 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1441 code == ASHIFTRT);
1442 else if (code == ASHIFT)
1443 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1444 else if (code == ROTATE)
1445 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1446 else /* code == ROTATERT */
1447 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1448 break;
1449
1450 default:
1451 return 0;
1452 }
1453
1454 return immed_double_const (lv, hv, mode);
1455 }
1456
1457 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1458 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1459 {
1460 /* Even if we can't compute a constant result,
1461 there are some cases worth simplifying. */
1462
1463 switch (code)
1464 {
1465 case PLUS:
1466 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1467 when x is NaN, infinite, or finite and nonzero. They aren't
1468 when x is -0 and the rounding mode is not towards -infinity,
1469 since (-0) + 0 is then 0. */
1470 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1471 return op0;
1472
1473 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1474 transformations are safe even for IEEE. */
1475 if (GET_CODE (op0) == NEG)
1476 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1477 else if (GET_CODE (op1) == NEG)
1478 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1479
1480 /* (~a) + 1 -> -a */
1481 if (INTEGRAL_MODE_P (mode)
1482 && GET_CODE (op0) == NOT
1483 && trueop1 == const1_rtx)
1484 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1485
1486 /* Handle both-operands-constant cases. We can only add
1487 CONST_INTs to constants since the sum of relocatable symbols
1488 can't be handled by most assemblers. Don't add CONST_INT
1489 to CONST_INT since overflow won't be computed properly if wider
1490 than HOST_BITS_PER_WIDE_INT. */
1491
1492 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1493 && GET_CODE (op1) == CONST_INT)
1494 return plus_constant (op0, INTVAL (op1));
1495 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1496 && GET_CODE (op0) == CONST_INT)
1497 return plus_constant (op1, INTVAL (op0));
1498
1499 /* See if this is something like X * C - X or vice versa or
1500 if the multiplication is written as a shift. If so, we can
1501 distribute and make a new multiply, shift, or maybe just
1502 have X (if C is 2 in the example above). But don't make
1503 something more expensive than we had before. */
1504
1505 if (! FLOAT_MODE_P (mode))
1506 {
1507 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1508 rtx lhs = op0, rhs = op1;
1509
1510 if (GET_CODE (lhs) == NEG)
1511 coeff0 = -1, lhs = XEXP (lhs, 0);
1512 else if (GET_CODE (lhs) == MULT
1513 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1514 {
1515 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1516 }
1517 else if (GET_CODE (lhs) == ASHIFT
1518 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1519 && INTVAL (XEXP (lhs, 1)) >= 0
1520 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1521 {
1522 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1523 lhs = XEXP (lhs, 0);
1524 }
1525
1526 if (GET_CODE (rhs) == NEG)
1527 coeff1 = -1, rhs = XEXP (rhs, 0);
1528 else if (GET_CODE (rhs) == MULT
1529 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1530 {
1531 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1532 }
1533 else if (GET_CODE (rhs) == ASHIFT
1534 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1535 && INTVAL (XEXP (rhs, 1)) >= 0
1536 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1537 {
1538 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1539 rhs = XEXP (rhs, 0);
1540 }
1541
1542 if (rtx_equal_p (lhs, rhs))
1543 {
1544 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1545 tem = simplify_gen_binary (MULT, mode, lhs,
1546 GEN_INT (coeff0 + coeff1));
1547 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1548 ? tem : 0;
1549 }
1550 }
1551
1552 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1553 if ((GET_CODE (op1) == CONST_INT
1554 || GET_CODE (op1) == CONST_DOUBLE)
1555 && GET_CODE (op0) == XOR
1556 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1557 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1558 && mode_signbit_p (mode, op1))
1559 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1560 simplify_gen_binary (XOR, mode, op1,
1561 XEXP (op0, 1)));
1562
1563 /* If one of the operands is a PLUS or a MINUS, see if we can
1564 simplify this by the associative law.
1565 Don't use the associative law for floating point.
1566 The inaccuracy makes it nonassociative,
1567 and subtle programs can break if operations are associated. */
1568
1569 if (INTEGRAL_MODE_P (mode)
1570 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1571 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1572 || (GET_CODE (op0) == CONST
1573 && GET_CODE (XEXP (op0, 0)) == PLUS)
1574 || (GET_CODE (op1) == CONST
1575 && GET_CODE (XEXP (op1, 0)) == PLUS))
1576 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1577 return tem;
1578
1579 /* Reassociate floating point addition only when the user
1580 specifies unsafe math optimizations. */
1581 if (FLOAT_MODE_P (mode)
1582 && flag_unsafe_math_optimizations)
1583 {
1584 tem = simplify_associative_operation (code, mode, op0, op1);
1585 if (tem)
1586 return tem;
1587 }
1588 break;
1589
1590 case COMPARE:
1591 #ifdef HAVE_cc0
1592 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1593 using cc0, in which case we want to leave it as a COMPARE
1594 so we can distinguish it from a register-register-copy.
1595
1596 In IEEE floating point, x-0 is not the same as x. */
1597
1598 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1599 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1600 && trueop1 == CONST0_RTX (mode))
1601 return op0;
1602 #endif
1603
1604 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1605 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1606 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1607 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1608 {
1609 rtx xop00 = XEXP (op0, 0);
1610 rtx xop10 = XEXP (op1, 0);
1611
1612 #ifdef HAVE_cc0
1613 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1614 #else
1615 if (REG_P (xop00) && REG_P (xop10)
1616 && GET_MODE (xop00) == GET_MODE (xop10)
1617 && REGNO (xop00) == REGNO (xop10)
1618 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1619 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1620 #endif
1621 return xop00;
1622 }
1623 break;
1624
1625 case MINUS:
1626 /* We can't assume x-x is 0 even with non-IEEE floating point,
1627 but since it is zero except in very strange circumstances, we
1628 will treat it as zero with -funsafe-math-optimizations. */
1629 if (rtx_equal_p (trueop0, trueop1)
1630 && ! side_effects_p (op0)
1631 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1632 return CONST0_RTX (mode);
1633
1634 /* Change subtraction from zero into negation. (0 - x) is the
1635 same as -x when x is NaN, infinite, or finite and nonzero.
1636 But if the mode has signed zeros, and does not round towards
1637 -infinity, then 0 - 0 is 0, not -0. */
1638 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1639 return simplify_gen_unary (NEG, mode, op1, mode);
1640
1641 /* (-1 - a) is ~a. */
1642 if (trueop0 == constm1_rtx)
1643 return simplify_gen_unary (NOT, mode, op1, mode);
1644
1645 /* Subtracting 0 has no effect unless the mode has signed zeros
1646 and supports rounding towards -infinity. In such a case,
1647 0 - 0 is -0. */
1648 if (!(HONOR_SIGNED_ZEROS (mode)
1649 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1650 && trueop1 == CONST0_RTX (mode))
1651 return op0;
1652
1653 /* See if this is something like X * C - X or vice versa or
1654 if the multiplication is written as a shift. If so, we can
1655 distribute and make a new multiply, shift, or maybe just
1656 have X (if C is 2 in the example above). But don't make
1657 something more expensive than we had before. */
1658
1659 if (! FLOAT_MODE_P (mode))
1660 {
1661 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1662 rtx lhs = op0, rhs = op1;
1663
1664 if (GET_CODE (lhs) == NEG)
1665 coeff0 = -1, lhs = XEXP (lhs, 0);
1666 else if (GET_CODE (lhs) == MULT
1667 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1668 {
1669 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1670 }
1671 else if (GET_CODE (lhs) == ASHIFT
1672 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1673 && INTVAL (XEXP (lhs, 1)) >= 0
1674 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1675 {
1676 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1677 lhs = XEXP (lhs, 0);
1678 }
1679
1680 if (GET_CODE (rhs) == NEG)
1681 coeff1 = - 1, rhs = XEXP (rhs, 0);
1682 else if (GET_CODE (rhs) == MULT
1683 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1684 {
1685 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1686 }
1687 else if (GET_CODE (rhs) == ASHIFT
1688 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1689 && INTVAL (XEXP (rhs, 1)) >= 0
1690 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1691 {
1692 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1693 rhs = XEXP (rhs, 0);
1694 }
1695
1696 if (rtx_equal_p (lhs, rhs))
1697 {
1698 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1699 tem = simplify_gen_binary (MULT, mode, lhs,
1700 GEN_INT (coeff0 - coeff1));
1701 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1702 ? tem : 0;
1703 }
1704 }
1705
1706 /* (a - (-b)) -> (a + b). True even for IEEE. */
1707 if (GET_CODE (op1) == NEG)
1708 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1709
1710 /* (-x - c) may be simplified as (-c - x). */
1711 if (GET_CODE (op0) == NEG
1712 && (GET_CODE (op1) == CONST_INT
1713 || GET_CODE (op1) == CONST_DOUBLE))
1714 {
1715 tem = simplify_unary_operation (NEG, mode, op1, mode);
1716 if (tem)
1717 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1718 }
1719
1720 /* If one of the operands is a PLUS or a MINUS, see if we can
1721 simplify this by the associative law.
1722 Don't use the associative law for floating point.
1723 The inaccuracy makes it nonassociative,
1724 and subtle programs can break if operations are associated. */
1725
1726 if (INTEGRAL_MODE_P (mode)
1727 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1728 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1729 || (GET_CODE (op0) == CONST
1730 && GET_CODE (XEXP (op0, 0)) == PLUS)
1731 || (GET_CODE (op1) == CONST
1732 && GET_CODE (XEXP (op1, 0)) == PLUS))
1733 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1734 return tem;
1735
1736 /* Don't let a relocatable value get a negative coeff. */
1737 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1738 return simplify_gen_binary (PLUS, mode,
1739 op0,
1740 neg_const_int (mode, op1));
1741
1742 /* (x - (x & y)) -> (x & ~y) */
1743 if (GET_CODE (op1) == AND)
1744 {
1745 if (rtx_equal_p (op0, XEXP (op1, 0)))
1746 {
1747 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1748 GET_MODE (XEXP (op1, 1)));
1749 return simplify_gen_binary (AND, mode, op0, tem);
1750 }
1751 if (rtx_equal_p (op0, XEXP (op1, 1)))
1752 {
1753 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1754 GET_MODE (XEXP (op1, 0)));
1755 return simplify_gen_binary (AND, mode, op0, tem);
1756 }
1757 }
1758 break;
1759
1760 case MULT:
1761 if (trueop1 == constm1_rtx)
1762 return simplify_gen_unary (NEG, mode, op0, mode);
1763
1764 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1765 x is NaN, since x * 0 is then also NaN. Nor is it valid
1766 when the mode has signed zeros, since multiplying a negative
1767 number by 0 will give -0, not 0. */
1768 if (!HONOR_NANS (mode)
1769 && !HONOR_SIGNED_ZEROS (mode)
1770 && trueop1 == CONST0_RTX (mode)
1771 && ! side_effects_p (op0))
1772 return op1;
1773
1774 /* In IEEE floating point, x*1 is not equivalent to x for
1775 signalling NaNs. */
1776 if (!HONOR_SNANS (mode)
1777 && trueop1 == CONST1_RTX (mode))
1778 return op0;
1779
1780 /* Convert multiply by constant power of two into shift unless
1781 we are still generating RTL. This test is a kludge. */
1782 if (GET_CODE (trueop1) == CONST_INT
1783 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1784 /* If the mode is larger than the host word size, and the
1785 uppermost bit is set, then this isn't a power of two due
1786 to implicit sign extension. */
1787 && (width <= HOST_BITS_PER_WIDE_INT
1788 || val != HOST_BITS_PER_WIDE_INT - 1))
1789 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1790
1791 /* x*2 is x+x and x*(-1) is -x */
1792 if (GET_CODE (trueop1) == CONST_DOUBLE
1793 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1794 && GET_MODE (op0) == mode)
1795 {
1796 REAL_VALUE_TYPE d;
1797 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1798
1799 if (REAL_VALUES_EQUAL (d, dconst2))
1800 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1801
1802 if (REAL_VALUES_EQUAL (d, dconstm1))
1803 return simplify_gen_unary (NEG, mode, op0, mode);
1804 }
1805
1806 /* Reassociate multiplication, but for floating point MULTs
1807 only when the user specifies unsafe math optimizations. */
1808 if (! FLOAT_MODE_P (mode)
1809 || flag_unsafe_math_optimizations)
1810 {
1811 tem = simplify_associative_operation (code, mode, op0, op1);
1812 if (tem)
1813 return tem;
1814 }
1815 break;
1816
1817 case IOR:
1818 if (trueop1 == const0_rtx)
1819 return op0;
1820 if (GET_CODE (trueop1) == CONST_INT
1821 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1822 == GET_MODE_MASK (mode)))
1823 return op1;
1824 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1825 return op0;
1826 /* A | (~A) -> -1 */
1827 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1828 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1829 && ! side_effects_p (op0)
1830 && GET_MODE_CLASS (mode) != MODE_CC)
1831 return constm1_rtx;
1832 tem = simplify_associative_operation (code, mode, op0, op1);
1833 if (tem)
1834 return tem;
1835 break;
1836
1837 case XOR:
1838 if (trueop1 == const0_rtx)
1839 return op0;
1840 if (GET_CODE (trueop1) == CONST_INT
1841 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1842 == GET_MODE_MASK (mode)))
1843 return simplify_gen_unary (NOT, mode, op0, mode);
1844 if (trueop0 == trueop1
1845 && ! side_effects_p (op0)
1846 && GET_MODE_CLASS (mode) != MODE_CC)
1847 return const0_rtx;
1848
1849 /* Canonicalize XOR of the most significant bit to PLUS. */
1850 if ((GET_CODE (op1) == CONST_INT
1851 || GET_CODE (op1) == CONST_DOUBLE)
1852 && mode_signbit_p (mode, op1))
1853 return simplify_gen_binary (PLUS, mode, op0, op1);
1854 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1855 if ((GET_CODE (op1) == CONST_INT
1856 || GET_CODE (op1) == CONST_DOUBLE)
1857 && GET_CODE (op0) == PLUS
1858 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1859 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1860 && mode_signbit_p (mode, XEXP (op0, 1)))
1861 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1862 simplify_gen_binary (XOR, mode, op1,
1863 XEXP (op0, 1)));
1864
1865 tem = simplify_associative_operation (code, mode, op0, op1);
1866 if (tem)
1867 return tem;
1868 break;
1869
1870 case AND:
1871 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1872 return const0_rtx;
1873 /* If we are turning off bits already known off in OP0, we need
1874 not do an AND. */
1875 if (GET_CODE (trueop1) == CONST_INT
1876 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1877 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1878 return op0;
1879 if (trueop0 == trueop1 && ! side_effects_p (op0)
1880 && GET_MODE_CLASS (mode) != MODE_CC)
1881 return op0;
1882 /* A & (~A) -> 0 */
1883 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1884 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1885 && ! side_effects_p (op0)
1886 && GET_MODE_CLASS (mode) != MODE_CC)
1887 return const0_rtx;
1888 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1889 ((A & N) + B) & M -> (A + B) & M
1890 Similarly if (N & M) == 0,
1891 ((A | N) + B) & M -> (A + B) & M
1892 and for - instead of + and/or ^ instead of |. */
1893 if (GET_CODE (trueop1) == CONST_INT
1894 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1895 && ~INTVAL (trueop1)
1896 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1897 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1898 {
1899 rtx pmop[2];
1900 int which;
1901
1902 pmop[0] = XEXP (op0, 0);
1903 pmop[1] = XEXP (op0, 1);
1904
1905 for (which = 0; which < 2; which++)
1906 {
1907 tem = pmop[which];
1908 switch (GET_CODE (tem))
1909 {
1910 case AND:
1911 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1913 == INTVAL (trueop1))
1914 pmop[which] = XEXP (tem, 0);
1915 break;
1916 case IOR:
1917 case XOR:
1918 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1919 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1920 pmop[which] = XEXP (tem, 0);
1921 break;
1922 default:
1923 break;
1924 }
1925 }
1926
1927 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1928 {
1929 tem = simplify_gen_binary (GET_CODE (op0), mode,
1930 pmop[0], pmop[1]);
1931 return simplify_gen_binary (code, mode, tem, op1);
1932 }
1933 }
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1938
1939 case UDIV:
1940 /* 0/x is 0 (or x&0 if x has side-effects). */
1941 if (trueop0 == const0_rtx)
1942 return side_effects_p (op1)
1943 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1944 : const0_rtx;
1945 /* x/1 is x. */
1946 if (trueop1 == const1_rtx)
1947 {
1948 /* Handle narrowing UDIV. */
1949 rtx x = gen_lowpart_common (mode, op0);
1950 if (x)
1951 return x;
1952 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1953 return gen_lowpart_SUBREG (mode, op0);
1954 return op0;
1955 }
1956 /* Convert divide by power of two into shift. */
1957 if (GET_CODE (trueop1) == CONST_INT
1958 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1959 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1960 break;
1961
1962 case DIV:
1963 /* Handle floating point and integers separately. */
1964 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1965 {
1966 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1967 safe for modes with NaNs, since 0.0 / 0.0 will then be
1968 NaN rather than 0.0. Nor is it safe for modes with signed
1969 zeros, since dividing 0 by a negative number gives -0.0 */
1970 if (trueop0 == CONST0_RTX (mode)
1971 && !HONOR_NANS (mode)
1972 && !HONOR_SIGNED_ZEROS (mode)
1973 && ! side_effects_p (op1))
1974 return op0;
1975 /* x/1.0 is x. */
1976 if (trueop1 == CONST1_RTX (mode)
1977 && !HONOR_SNANS (mode))
1978 return op0;
1979
1980 if (GET_CODE (trueop1) == CONST_DOUBLE
1981 && trueop1 != CONST0_RTX (mode))
1982 {
1983 REAL_VALUE_TYPE d;
1984 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1985
1986 /* x/-1.0 is -x. */
1987 if (REAL_VALUES_EQUAL (d, dconstm1)
1988 && !HONOR_SNANS (mode))
1989 return simplify_gen_unary (NEG, mode, op0, mode);
1990
1991 /* Change FP division by a constant into multiplication.
1992 Only do this with -funsafe-math-optimizations. */
1993 if (flag_unsafe_math_optimizations
1994 && !REAL_VALUES_EQUAL (d, dconst0))
1995 {
1996 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1997 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1998 return simplify_gen_binary (MULT, mode, op0, tem);
1999 }
2000 }
2001 }
2002 else
2003 {
2004 /* 0/x is 0 (or x&0 if x has side-effects). */
2005 if (trueop0 == const0_rtx)
2006 return side_effects_p (op1)
2007 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2008 : const0_rtx;
2009 /* x/1 is x. */
2010 if (trueop1 == const1_rtx)
2011 {
2012 /* Handle narrowing DIV. */
2013 rtx x = gen_lowpart_common (mode, op0);
2014 if (x)
2015 return x;
2016 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2017 return gen_lowpart_SUBREG (mode, op0);
2018 return op0;
2019 }
2020 /* x/-1 is -x. */
2021 if (trueop1 == constm1_rtx)
2022 {
2023 rtx x = gen_lowpart_common (mode, op0);
2024 if (!x)
2025 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2026 ? gen_lowpart_SUBREG (mode, op0) : op0;
2027 return simplify_gen_unary (NEG, mode, x, mode);
2028 }
2029 }
2030 break;
2031
2032 case UMOD:
2033 /* 0%x is 0 (or x&0 if x has side-effects). */
2034 if (trueop0 == const0_rtx)
2035 return side_effects_p (op1)
2036 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2037 : const0_rtx;
2038 /* x%1 is 0 (of x&0 if x has side-effects). */
2039 if (trueop1 == const1_rtx)
2040 return side_effects_p (op0)
2041 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2042 : const0_rtx;
2043 /* Implement modulus by power of two as AND. */
2044 if (GET_CODE (trueop1) == CONST_INT
2045 && exact_log2 (INTVAL (trueop1)) > 0)
2046 return simplify_gen_binary (AND, mode, op0,
2047 GEN_INT (INTVAL (op1) - 1));
2048 break;
2049
2050 case MOD:
2051 /* 0%x is 0 (or x&0 if x has side-effects). */
2052 if (trueop0 == const0_rtx)
2053 return side_effects_p (op1)
2054 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2055 : const0_rtx;
2056 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2057 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2058 return side_effects_p (op0)
2059 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2060 : const0_rtx;
2061 break;
2062
2063 case ROTATERT:
2064 case ROTATE:
2065 case ASHIFTRT:
2066 /* Rotating ~0 always results in ~0. */
2067 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2068 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2069 && ! side_effects_p (op1))
2070 return op0;
2071
2072 /* Fall through.... */
2073
2074 case ASHIFT:
2075 case LSHIFTRT:
2076 if (trueop1 == const0_rtx)
2077 return op0;
2078 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2079 return op0;
2080 break;
2081
2082 case SMIN:
2083 if (width <= HOST_BITS_PER_WIDE_INT
2084 && GET_CODE (trueop1) == CONST_INT
2085 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2086 && ! side_effects_p (op0))
2087 return op1;
2088 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2089 return op0;
2090 tem = simplify_associative_operation (code, mode, op0, op1);
2091 if (tem)
2092 return tem;
2093 break;
2094
2095 case SMAX:
2096 if (width <= HOST_BITS_PER_WIDE_INT
2097 && GET_CODE (trueop1) == CONST_INT
2098 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2099 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2100 && ! side_effects_p (op0))
2101 return op1;
2102 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2103 return op0;
2104 tem = simplify_associative_operation (code, mode, op0, op1);
2105 if (tem)
2106 return tem;
2107 break;
2108
2109 case UMIN:
2110 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2111 return op1;
2112 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2113 return op0;
2114 tem = simplify_associative_operation (code, mode, op0, op1);
2115 if (tem)
2116 return tem;
2117 break;
2118
2119 case UMAX:
2120 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2121 return op1;
2122 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2123 return op0;
2124 tem = simplify_associative_operation (code, mode, op0, op1);
2125 if (tem)
2126 return tem;
2127 break;
2128
2129 case SS_PLUS:
2130 case US_PLUS:
2131 case SS_MINUS:
2132 case US_MINUS:
2133 /* ??? There are simplifications that can be done. */
2134 return 0;
2135
2136 case VEC_SELECT:
2137 if (!VECTOR_MODE_P (mode))
2138 {
2139 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2140 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2141 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2142 gcc_assert (XVECLEN (trueop1, 0) == 1);
2143 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2144
2145 if (GET_CODE (trueop0) == CONST_VECTOR)
2146 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2147 (trueop1, 0, 0)));
2148 }
2149 else
2150 {
2151 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2152 gcc_assert (GET_MODE_INNER (mode)
2153 == GET_MODE_INNER (GET_MODE (trueop0)));
2154 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2155
2156 if (GET_CODE (trueop0) == CONST_VECTOR)
2157 {
2158 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2159 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2160 rtvec v = rtvec_alloc (n_elts);
2161 unsigned int i;
2162
2163 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2164 for (i = 0; i < n_elts; i++)
2165 {
2166 rtx x = XVECEXP (trueop1, 0, i);
2167
2168 gcc_assert (GET_CODE (x) == CONST_INT);
2169 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2170 INTVAL (x));
2171 }
2172
2173 return gen_rtx_CONST_VECTOR (mode, v);
2174 }
2175 }
2176 return 0;
2177 case VEC_CONCAT:
2178 {
2179 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2180 ? GET_MODE (trueop0)
2181 : GET_MODE_INNER (mode));
2182 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2183 ? GET_MODE (trueop1)
2184 : GET_MODE_INNER (mode));
2185
2186 gcc_assert (VECTOR_MODE_P (mode));
2187 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2188 == GET_MODE_SIZE (mode));
2189
2190 if (VECTOR_MODE_P (op0_mode))
2191 gcc_assert (GET_MODE_INNER (mode)
2192 == GET_MODE_INNER (op0_mode));
2193 else
2194 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2195
2196 if (VECTOR_MODE_P (op1_mode))
2197 gcc_assert (GET_MODE_INNER (mode)
2198 == GET_MODE_INNER (op1_mode));
2199 else
2200 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2201
2202 if ((GET_CODE (trueop0) == CONST_VECTOR
2203 || GET_CODE (trueop0) == CONST_INT
2204 || GET_CODE (trueop0) == CONST_DOUBLE)
2205 && (GET_CODE (trueop1) == CONST_VECTOR
2206 || GET_CODE (trueop1) == CONST_INT
2207 || GET_CODE (trueop1) == CONST_DOUBLE))
2208 {
2209 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2210 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2211 rtvec v = rtvec_alloc (n_elts);
2212 unsigned int i;
2213 unsigned in_n_elts = 1;
2214
2215 if (VECTOR_MODE_P (op0_mode))
2216 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2217 for (i = 0; i < n_elts; i++)
2218 {
2219 if (i < in_n_elts)
2220 {
2221 if (!VECTOR_MODE_P (op0_mode))
2222 RTVEC_ELT (v, i) = trueop0;
2223 else
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2225 }
2226 else
2227 {
2228 if (!VECTOR_MODE_P (op1_mode))
2229 RTVEC_ELT (v, i) = trueop1;
2230 else
2231 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2232 i - in_n_elts);
2233 }
2234 }
2235
2236 return gen_rtx_CONST_VECTOR (mode, v);
2237 }
2238 }
2239 return 0;
2240
2241 default:
2242 gcc_unreachable ();
2243 }
2244
2245 return 0;
2246 }
2247
2248 /* Get the integer argument values in two forms:
2249 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2250
2251 arg0 = INTVAL (trueop0);
2252 arg1 = INTVAL (trueop1);
2253
2254 if (width < HOST_BITS_PER_WIDE_INT)
2255 {
2256 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2257 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2258
2259 arg0s = arg0;
2260 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2261 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2262
2263 arg1s = arg1;
2264 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2265 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2266 }
2267 else
2268 {
2269 arg0s = arg0;
2270 arg1s = arg1;
2271 }
2272
2273 /* Compute the value of the arithmetic. */
2274
2275 switch (code)
2276 {
2277 case PLUS:
2278 val = arg0s + arg1s;
2279 break;
2280
2281 case MINUS:
2282 val = arg0s - arg1s;
2283 break;
2284
2285 case MULT:
2286 val = arg0s * arg1s;
2287 break;
2288
2289 case DIV:
2290 if (arg1s == 0
2291 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2292 && arg1s == -1))
2293 return 0;
2294 val = arg0s / arg1s;
2295 break;
2296
2297 case MOD:
2298 if (arg1s == 0
2299 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2300 && arg1s == -1))
2301 return 0;
2302 val = arg0s % arg1s;
2303 break;
2304
2305 case UDIV:
2306 if (arg1 == 0
2307 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2308 && arg1s == -1))
2309 return 0;
2310 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2311 break;
2312
2313 case UMOD:
2314 if (arg1 == 0
2315 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2316 && arg1s == -1))
2317 return 0;
2318 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2319 break;
2320
2321 case AND:
2322 val = arg0 & arg1;
2323 break;
2324
2325 case IOR:
2326 val = arg0 | arg1;
2327 break;
2328
2329 case XOR:
2330 val = arg0 ^ arg1;
2331 break;
2332
2333 case LSHIFTRT:
2334 case ASHIFT:
2335 case ASHIFTRT:
2336 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2337 value is in range. We can't return any old value for out-of-range
2338 arguments because either the middle-end (via shift_truncation_mask)
2339 or the back-end might be relying on target-specific knowledge.
2340 Nor can we rely on shift_truncation_mask, since the shift might
2341 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2342 if (SHIFT_COUNT_TRUNCATED)
2343 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2344 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2345 return 0;
2346
2347 val = (code == ASHIFT
2348 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2349 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2350
2351 /* Sign-extend the result for arithmetic right shifts. */
2352 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2353 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2354 break;
2355
2356 case ROTATERT:
2357 if (arg1 < 0)
2358 return 0;
2359
2360 arg1 %= width;
2361 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2362 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2363 break;
2364
2365 case ROTATE:
2366 if (arg1 < 0)
2367 return 0;
2368
2369 arg1 %= width;
2370 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2371 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2372 break;
2373
2374 case COMPARE:
2375 /* Do nothing here. */
2376 return 0;
2377
2378 case SMIN:
2379 val = arg0s <= arg1s ? arg0s : arg1s;
2380 break;
2381
2382 case UMIN:
2383 val = ((unsigned HOST_WIDE_INT) arg0
2384 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2385 break;
2386
2387 case SMAX:
2388 val = arg0s > arg1s ? arg0s : arg1s;
2389 break;
2390
2391 case UMAX:
2392 val = ((unsigned HOST_WIDE_INT) arg0
2393 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2394 break;
2395
2396 case SS_PLUS:
2397 case US_PLUS:
2398 case SS_MINUS:
2399 case US_MINUS:
2400 /* ??? There are simplifications that can be done. */
2401 return 0;
2402
2403 default:
2404 gcc_unreachable ();
2405 }
2406
2407 val = trunc_int_for_mode (val, mode);
2408
2409 return GEN_INT (val);
2410 }
2411 \f
2412 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2413 PLUS or MINUS.
2414
2415 Rather than test for specific case, we do this by a brute-force method
2416 and do all possible simplifications until no more changes occur. Then
2417 we rebuild the operation.
2418
2419 If FORCE is true, then always generate the rtx. This is used to
2420 canonicalize stuff emitted from simplify_gen_binary. Note that this
2421 can still fail if the rtx is too complex. It won't fail just because
2422 the result is not 'simpler' than the input, however. */
2423
2424 struct simplify_plus_minus_op_data
2425 {
2426 rtx op;
2427 int neg;
2428 };
2429
2430 static int
2431 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2432 {
2433 const struct simplify_plus_minus_op_data *d1 = p1;
2434 const struct simplify_plus_minus_op_data *d2 = p2;
2435
2436 return (commutative_operand_precedence (d2->op)
2437 - commutative_operand_precedence (d1->op));
2438 }
2439
2440 static rtx
2441 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2442 rtx op1, int force)
2443 {
2444 struct simplify_plus_minus_op_data ops[8];
2445 rtx result, tem;
2446 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2447 int first, changed;
2448 int i, j;
2449
2450 memset (ops, 0, sizeof ops);
2451
2452 /* Set up the two operands and then expand them until nothing has been
2453 changed. If we run out of room in our array, give up; this should
2454 almost never happen. */
2455
2456 ops[0].op = op0;
2457 ops[0].neg = 0;
2458 ops[1].op = op1;
2459 ops[1].neg = (code == MINUS);
2460
2461 do
2462 {
2463 changed = 0;
2464
2465 for (i = 0; i < n_ops; i++)
2466 {
2467 rtx this_op = ops[i].op;
2468 int this_neg = ops[i].neg;
2469 enum rtx_code this_code = GET_CODE (this_op);
2470
2471 switch (this_code)
2472 {
2473 case PLUS:
2474 case MINUS:
2475 if (n_ops == 7)
2476 return NULL_RTX;
2477
2478 ops[n_ops].op = XEXP (this_op, 1);
2479 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2480 n_ops++;
2481
2482 ops[i].op = XEXP (this_op, 0);
2483 input_ops++;
2484 changed = 1;
2485 break;
2486
2487 case NEG:
2488 ops[i].op = XEXP (this_op, 0);
2489 ops[i].neg = ! this_neg;
2490 changed = 1;
2491 break;
2492
2493 case CONST:
2494 if (n_ops < 7
2495 && GET_CODE (XEXP (this_op, 0)) == PLUS
2496 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2497 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2498 {
2499 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2500 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2501 ops[n_ops].neg = this_neg;
2502 n_ops++;
2503 input_consts++;
2504 changed = 1;
2505 }
2506 break;
2507
2508 case NOT:
2509 /* ~a -> (-a - 1) */
2510 if (n_ops != 7)
2511 {
2512 ops[n_ops].op = constm1_rtx;
2513 ops[n_ops++].neg = this_neg;
2514 ops[i].op = XEXP (this_op, 0);
2515 ops[i].neg = !this_neg;
2516 changed = 1;
2517 }
2518 break;
2519
2520 case CONST_INT:
2521 if (this_neg)
2522 {
2523 ops[i].op = neg_const_int (mode, this_op);
2524 ops[i].neg = 0;
2525 changed = 1;
2526 }
2527 break;
2528
2529 default:
2530 break;
2531 }
2532 }
2533 }
2534 while (changed);
2535
2536 /* If we only have two operands, we can't do anything. */
2537 if (n_ops <= 2 && !force)
2538 return NULL_RTX;
2539
2540 /* Count the number of CONSTs we didn't split above. */
2541 for (i = 0; i < n_ops; i++)
2542 if (GET_CODE (ops[i].op) == CONST)
2543 input_consts++;
2544
2545 /* Now simplify each pair of operands until nothing changes. The first
2546 time through just simplify constants against each other. */
2547
2548 first = 1;
2549 do
2550 {
2551 changed = first;
2552
2553 for (i = 0; i < n_ops - 1; i++)
2554 for (j = i + 1; j < n_ops; j++)
2555 {
2556 rtx lhs = ops[i].op, rhs = ops[j].op;
2557 int lneg = ops[i].neg, rneg = ops[j].neg;
2558
2559 if (lhs != 0 && rhs != 0
2560 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2561 {
2562 enum rtx_code ncode = PLUS;
2563
2564 if (lneg != rneg)
2565 {
2566 ncode = MINUS;
2567 if (lneg)
2568 tem = lhs, lhs = rhs, rhs = tem;
2569 }
2570 else if (swap_commutative_operands_p (lhs, rhs))
2571 tem = lhs, lhs = rhs, rhs = tem;
2572
2573 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2574
2575 /* Reject "simplifications" that just wrap the two
2576 arguments in a CONST. Failure to do so can result
2577 in infinite recursion with simplify_binary_operation
2578 when it calls us to simplify CONST operations. */
2579 if (tem
2580 && ! (GET_CODE (tem) == CONST
2581 && GET_CODE (XEXP (tem, 0)) == ncode
2582 && XEXP (XEXP (tem, 0), 0) == lhs
2583 && XEXP (XEXP (tem, 0), 1) == rhs)
2584 /* Don't allow -x + -1 -> ~x simplifications in the
2585 first pass. This allows us the chance to combine
2586 the -1 with other constants. */
2587 && ! (first
2588 && GET_CODE (tem) == NOT
2589 && XEXP (tem, 0) == rhs))
2590 {
2591 lneg &= rneg;
2592 if (GET_CODE (tem) == NEG)
2593 tem = XEXP (tem, 0), lneg = !lneg;
2594 if (GET_CODE (tem) == CONST_INT && lneg)
2595 tem = neg_const_int (mode, tem), lneg = 0;
2596
2597 ops[i].op = tem;
2598 ops[i].neg = lneg;
2599 ops[j].op = NULL_RTX;
2600 changed = 1;
2601 }
2602 }
2603 }
2604
2605 first = 0;
2606 }
2607 while (changed);
2608
2609 /* Pack all the operands to the lower-numbered entries. */
2610 for (i = 0, j = 0; j < n_ops; j++)
2611 if (ops[j].op)
2612 ops[i++] = ops[j];
2613 n_ops = i;
2614
2615 /* Sort the operations based on swap_commutative_operands_p. */
2616 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2617
2618 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2619 if (n_ops == 2
2620 && GET_CODE (ops[1].op) == CONST_INT
2621 && CONSTANT_P (ops[0].op)
2622 && ops[0].neg)
2623 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2624
2625 /* We suppressed creation of trivial CONST expressions in the
2626 combination loop to avoid recursion. Create one manually now.
2627 The combination loop should have ensured that there is exactly
2628 one CONST_INT, and the sort will have ensured that it is last
2629 in the array and that any other constant will be next-to-last. */
2630
2631 if (n_ops > 1
2632 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2633 && CONSTANT_P (ops[n_ops - 2].op))
2634 {
2635 rtx value = ops[n_ops - 1].op;
2636 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2637 value = neg_const_int (mode, value);
2638 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2639 n_ops--;
2640 }
2641
2642 /* Count the number of CONSTs that we generated. */
2643 n_consts = 0;
2644 for (i = 0; i < n_ops; i++)
2645 if (GET_CODE (ops[i].op) == CONST)
2646 n_consts++;
2647
2648 /* Give up if we didn't reduce the number of operands we had. Make
2649 sure we count a CONST as two operands. If we have the same
2650 number of operands, but have made more CONSTs than before, this
2651 is also an improvement, so accept it. */
2652 if (!force
2653 && (n_ops + n_consts > input_ops
2654 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2655 return NULL_RTX;
2656
2657 /* Put a non-negated operand first, if possible. */
2658
2659 for (i = 0; i < n_ops && ops[i].neg; i++)
2660 continue;
2661 if (i == n_ops)
2662 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2663 else if (i != 0)
2664 {
2665 tem = ops[0].op;
2666 ops[0] = ops[i];
2667 ops[i].op = tem;
2668 ops[i].neg = 1;
2669 }
2670
2671 /* Now make the result by performing the requested operations. */
2672 result = ops[0].op;
2673 for (i = 1; i < n_ops; i++)
2674 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2675 mode, result, ops[i].op);
2676
2677 return result;
2678 }
2679
2680 /* Like simplify_binary_operation except used for relational operators.
2681 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2682 not also be VOIDmode.
2683
2684 CMP_MODE specifies in which mode the comparison is done in, so it is
2685 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2686 the operands or, if both are VOIDmode, the operands are compared in
2687 "infinite precision". */
2688 rtx
2689 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2690 enum machine_mode cmp_mode, rtx op0, rtx op1)
2691 {
2692 rtx tem, trueop0, trueop1;
2693
2694 if (cmp_mode == VOIDmode)
2695 cmp_mode = GET_MODE (op0);
2696 if (cmp_mode == VOIDmode)
2697 cmp_mode = GET_MODE (op1);
2698
2699 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2700 if (tem)
2701 {
2702 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2703 {
2704 if (tem == const0_rtx)
2705 return CONST0_RTX (mode);
2706 #ifdef FLOAT_STORE_FLAG_VALUE
2707 {
2708 REAL_VALUE_TYPE val;
2709 val = FLOAT_STORE_FLAG_VALUE (mode);
2710 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2711 }
2712 #else
2713 return NULL_RTX;
2714 #endif
2715 }
2716 if (VECTOR_MODE_P (mode))
2717 {
2718 if (tem == const0_rtx)
2719 return CONST0_RTX (mode);
2720 #ifdef VECTOR_STORE_FLAG_VALUE
2721 {
2722 int i, units;
2723 rtvec c;
2724
2725 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2726 if (val == NULL_RTX)
2727 return NULL_RTX;
2728 if (val == const1_rtx)
2729 return CONST1_RTX (mode);
2730
2731 units = GET_MODE_NUNITS (mode);
2732 v = rtvec_alloc (units);
2733 for (i = 0; i < units; i++)
2734 RTVEC_ELT (v, i) = val;
2735 return gen_rtx_raw_CONST_VECTOR (mode, v);
2736 }
2737 #else
2738 return NULL_RTX;
2739 #endif
2740 }
2741
2742 return tem;
2743 }
2744
2745 /* For the following tests, ensure const0_rtx is op1. */
2746 if (swap_commutative_operands_p (op0, op1)
2747 || (op0 == const0_rtx && op1 != const0_rtx))
2748 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2749
2750 /* If op0 is a compare, extract the comparison arguments from it. */
2751 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2752 return simplify_relational_operation (code, mode, VOIDmode,
2753 XEXP (op0, 0), XEXP (op0, 1));
2754
2755 if (mode == VOIDmode
2756 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2757 || CC0_P (op0))
2758 return NULL_RTX;
2759
2760 trueop0 = avoid_constant_pool_reference (op0);
2761 trueop1 = avoid_constant_pool_reference (op1);
2762 return simplify_relational_operation_1 (code, mode, cmp_mode,
2763 trueop0, trueop1);
2764 }
2765
2766 /* This part of simplify_relational_operation is only used when CMP_MODE
2767 is not in class MODE_CC (i.e. it is a real comparison).
2768
2769 MODE is the mode of the result, while CMP_MODE specifies in which
2770 mode the comparison is done in, so it is the mode of the operands. */
2771 rtx
2772 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2773 enum machine_mode cmp_mode, rtx op0, rtx op1)
2774 {
2775 if (GET_CODE (op1) == CONST_INT)
2776 {
2777 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2778 {
2779 /* If op0 is a comparison, extract the comparison arguments form it. */
2780 if (code == NE)
2781 {
2782 if (GET_MODE (op0) == cmp_mode)
2783 return simplify_rtx (op0);
2784 else
2785 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2786 XEXP (op0, 0), XEXP (op0, 1));
2787 }
2788 else if (code == EQ)
2789 {
2790 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2791 if (new_code != UNKNOWN)
2792 return simplify_gen_relational (new_code, mode, VOIDmode,
2793 XEXP (op0, 0), XEXP (op0, 1));
2794 }
2795 }
2796 }
2797
2798 return NULL_RTX;
2799 }
2800
2801 /* Check if the given comparison (done in the given MODE) is actually a
2802 tautology or a contradiction.
2803 If no simplification is possible, this function returns zero.
2804 Otherwise, it returns either const_true_rtx or const0_rtx. */
2805
2806 rtx
2807 simplify_const_relational_operation (enum rtx_code code,
2808 enum machine_mode mode,
2809 rtx op0, rtx op1)
2810 {
2811 int equal, op0lt, op0ltu, op1lt, op1ltu;
2812 rtx tem;
2813 rtx trueop0;
2814 rtx trueop1;
2815
2816 gcc_assert (mode != VOIDmode
2817 || (GET_MODE (op0) == VOIDmode
2818 && GET_MODE (op1) == VOIDmode));
2819
2820 /* If op0 is a compare, extract the comparison arguments from it. */
2821 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2822 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2823
2824 /* We can't simplify MODE_CC values since we don't know what the
2825 actual comparison is. */
2826 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2827 return 0;
2828
2829 /* Make sure the constant is second. */
2830 if (swap_commutative_operands_p (op0, op1))
2831 {
2832 tem = op0, op0 = op1, op1 = tem;
2833 code = swap_condition (code);
2834 }
2835
2836 trueop0 = avoid_constant_pool_reference (op0);
2837 trueop1 = avoid_constant_pool_reference (op1);
2838
2839 /* For integer comparisons of A and B maybe we can simplify A - B and can
2840 then simplify a comparison of that with zero. If A and B are both either
2841 a register or a CONST_INT, this can't help; testing for these cases will
2842 prevent infinite recursion here and speed things up.
2843
2844 If CODE is an unsigned comparison, then we can never do this optimization,
2845 because it gives an incorrect result if the subtraction wraps around zero.
2846 ANSI C defines unsigned operations such that they never overflow, and
2847 thus such cases can not be ignored; but we cannot do it even for
2848 signed comparisons for languages such as Java, so test flag_wrapv. */
2849
2850 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2851 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2852 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2853 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2854 /* We cannot do this for == or != if tem is a nonzero address. */
2855 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2856 && code != GTU && code != GEU && code != LTU && code != LEU)
2857 return simplify_const_relational_operation (signed_condition (code),
2858 mode, tem, const0_rtx);
2859
2860 if (flag_unsafe_math_optimizations && code == ORDERED)
2861 return const_true_rtx;
2862
2863 if (flag_unsafe_math_optimizations && code == UNORDERED)
2864 return const0_rtx;
2865
2866 /* For modes without NaNs, if the two operands are equal, we know the
2867 result except if they have side-effects. */
2868 if (! HONOR_NANS (GET_MODE (trueop0))
2869 && rtx_equal_p (trueop0, trueop1)
2870 && ! side_effects_p (trueop0))
2871 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2872
2873 /* If the operands are floating-point constants, see if we can fold
2874 the result. */
2875 else if (GET_CODE (trueop0) == CONST_DOUBLE
2876 && GET_CODE (trueop1) == CONST_DOUBLE
2877 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2878 {
2879 REAL_VALUE_TYPE d0, d1;
2880
2881 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2882 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2883
2884 /* Comparisons are unordered iff at least one of the values is NaN. */
2885 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2886 switch (code)
2887 {
2888 case UNEQ:
2889 case UNLT:
2890 case UNGT:
2891 case UNLE:
2892 case UNGE:
2893 case NE:
2894 case UNORDERED:
2895 return const_true_rtx;
2896 case EQ:
2897 case LT:
2898 case GT:
2899 case LE:
2900 case GE:
2901 case LTGT:
2902 case ORDERED:
2903 return const0_rtx;
2904 default:
2905 return 0;
2906 }
2907
2908 equal = REAL_VALUES_EQUAL (d0, d1);
2909 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2910 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2911 }
2912
2913 /* Otherwise, see if the operands are both integers. */
2914 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2915 && (GET_CODE (trueop0) == CONST_DOUBLE
2916 || GET_CODE (trueop0) == CONST_INT)
2917 && (GET_CODE (trueop1) == CONST_DOUBLE
2918 || GET_CODE (trueop1) == CONST_INT))
2919 {
2920 int width = GET_MODE_BITSIZE (mode);
2921 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2922 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2923
2924 /* Get the two words comprising each integer constant. */
2925 if (GET_CODE (trueop0) == CONST_DOUBLE)
2926 {
2927 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2928 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2929 }
2930 else
2931 {
2932 l0u = l0s = INTVAL (trueop0);
2933 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2934 }
2935
2936 if (GET_CODE (trueop1) == CONST_DOUBLE)
2937 {
2938 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2939 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2940 }
2941 else
2942 {
2943 l1u = l1s = INTVAL (trueop1);
2944 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2945 }
2946
2947 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2948 we have to sign or zero-extend the values. */
2949 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2950 {
2951 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2952 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2953
2954 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2955 l0s |= ((HOST_WIDE_INT) (-1) << width);
2956
2957 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2958 l1s |= ((HOST_WIDE_INT) (-1) << width);
2959 }
2960 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2961 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2962
2963 equal = (h0u == h1u && l0u == l1u);
2964 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2965 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2966 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2967 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2968 }
2969
2970 /* Otherwise, there are some code-specific tests we can make. */
2971 else
2972 {
2973 /* Optimize comparisons with upper and lower bounds. */
2974 if (SCALAR_INT_MODE_P (mode)
2975 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2976 {
2977 rtx mmin, mmax;
2978 int sign;
2979
2980 if (code == GEU
2981 || code == LEU
2982 || code == GTU
2983 || code == LTU)
2984 sign = 0;
2985 else
2986 sign = 1;
2987
2988 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2989
2990 tem = NULL_RTX;
2991 switch (code)
2992 {
2993 case GEU:
2994 case GE:
2995 /* x >= min is always true. */
2996 if (rtx_equal_p (trueop1, mmin))
2997 tem = const_true_rtx;
2998 else
2999 break;
3000
3001 case LEU:
3002 case LE:
3003 /* x <= max is always true. */
3004 if (rtx_equal_p (trueop1, mmax))
3005 tem = const_true_rtx;
3006 break;
3007
3008 case GTU:
3009 case GT:
3010 /* x > max is always false. */
3011 if (rtx_equal_p (trueop1, mmax))
3012 tem = const0_rtx;
3013 break;
3014
3015 case LTU:
3016 case LT:
3017 /* x < min is always false. */
3018 if (rtx_equal_p (trueop1, mmin))
3019 tem = const0_rtx;
3020 break;
3021
3022 default:
3023 break;
3024 }
3025 if (tem == const0_rtx
3026 || tem == const_true_rtx)
3027 return tem;
3028 }
3029
3030 switch (code)
3031 {
3032 case EQ:
3033 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3034 return const0_rtx;
3035 break;
3036
3037 case NE:
3038 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3039 return const_true_rtx;
3040 break;
3041
3042 case LT:
3043 /* Optimize abs(x) < 0.0. */
3044 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3045 {
3046 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3047 : trueop0;
3048 if (GET_CODE (tem) == ABS)
3049 return const0_rtx;
3050 }
3051 break;
3052
3053 case GE:
3054 /* Optimize abs(x) >= 0.0. */
3055 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3056 {
3057 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3058 : trueop0;
3059 if (GET_CODE (tem) == ABS)
3060 return const_true_rtx;
3061 }
3062 break;
3063
3064 case UNGE:
3065 /* Optimize ! (abs(x) < 0.0). */
3066 if (trueop1 == CONST0_RTX (mode))
3067 {
3068 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3069 : trueop0;
3070 if (GET_CODE (tem) == ABS)
3071 return const_true_rtx;
3072 }
3073 break;
3074
3075 default:
3076 break;
3077 }
3078
3079 return 0;
3080 }
3081
3082 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3083 as appropriate. */
3084 switch (code)
3085 {
3086 case EQ:
3087 case UNEQ:
3088 return equal ? const_true_rtx : const0_rtx;
3089 case NE:
3090 case LTGT:
3091 return ! equal ? const_true_rtx : const0_rtx;
3092 case LT:
3093 case UNLT:
3094 return op0lt ? const_true_rtx : const0_rtx;
3095 case GT:
3096 case UNGT:
3097 return op1lt ? const_true_rtx : const0_rtx;
3098 case LTU:
3099 return op0ltu ? const_true_rtx : const0_rtx;
3100 case GTU:
3101 return op1ltu ? const_true_rtx : const0_rtx;
3102 case LE:
3103 case UNLE:
3104 return equal || op0lt ? const_true_rtx : const0_rtx;
3105 case GE:
3106 case UNGE:
3107 return equal || op1lt ? const_true_rtx : const0_rtx;
3108 case LEU:
3109 return equal || op0ltu ? const_true_rtx : const0_rtx;
3110 case GEU:
3111 return equal || op1ltu ? const_true_rtx : const0_rtx;
3112 case ORDERED:
3113 return const_true_rtx;
3114 case UNORDERED:
3115 return const0_rtx;
3116 default:
3117 gcc_unreachable ();
3118 }
3119 }
3120 \f
3121 /* Simplify CODE, an operation with result mode MODE and three operands,
3122 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3123 a constant. Return 0 if no simplifications is possible. */
3124
3125 rtx
3126 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3127 enum machine_mode op0_mode, rtx op0, rtx op1,
3128 rtx op2)
3129 {
3130 unsigned int width = GET_MODE_BITSIZE (mode);
3131
3132 /* VOIDmode means "infinite" precision. */
3133 if (width == 0)
3134 width = HOST_BITS_PER_WIDE_INT;
3135
3136 switch (code)
3137 {
3138 case SIGN_EXTRACT:
3139 case ZERO_EXTRACT:
3140 if (GET_CODE (op0) == CONST_INT
3141 && GET_CODE (op1) == CONST_INT
3142 && GET_CODE (op2) == CONST_INT
3143 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3144 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3145 {
3146 /* Extracting a bit-field from a constant */
3147 HOST_WIDE_INT val = INTVAL (op0);
3148
3149 if (BITS_BIG_ENDIAN)
3150 val >>= (GET_MODE_BITSIZE (op0_mode)
3151 - INTVAL (op2) - INTVAL (op1));
3152 else
3153 val >>= INTVAL (op2);
3154
3155 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3156 {
3157 /* First zero-extend. */
3158 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3159 /* If desired, propagate sign bit. */
3160 if (code == SIGN_EXTRACT
3161 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3162 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3163 }
3164
3165 /* Clear the bits that don't belong in our mode,
3166 unless they and our sign bit are all one.
3167 So we get either a reasonable negative value or a reasonable
3168 unsigned value for this mode. */
3169 if (width < HOST_BITS_PER_WIDE_INT
3170 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3171 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3172 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3173
3174 return gen_int_mode (val, mode);
3175 }
3176 break;
3177
3178 case IF_THEN_ELSE:
3179 if (GET_CODE (op0) == CONST_INT)
3180 return op0 != const0_rtx ? op1 : op2;
3181
3182 /* Convert c ? a : a into "a". */
3183 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3184 return op1;
3185
3186 /* Convert a != b ? a : b into "a". */
3187 if (GET_CODE (op0) == NE
3188 && ! side_effects_p (op0)
3189 && ! HONOR_NANS (mode)
3190 && ! HONOR_SIGNED_ZEROS (mode)
3191 && ((rtx_equal_p (XEXP (op0, 0), op1)
3192 && rtx_equal_p (XEXP (op0, 1), op2))
3193 || (rtx_equal_p (XEXP (op0, 0), op2)
3194 && rtx_equal_p (XEXP (op0, 1), op1))))
3195 return op1;
3196
3197 /* Convert a == b ? a : b into "b". */
3198 if (GET_CODE (op0) == EQ
3199 && ! side_effects_p (op0)
3200 && ! HONOR_NANS (mode)
3201 && ! HONOR_SIGNED_ZEROS (mode)
3202 && ((rtx_equal_p (XEXP (op0, 0), op1)
3203 && rtx_equal_p (XEXP (op0, 1), op2))
3204 || (rtx_equal_p (XEXP (op0, 0), op2)
3205 && rtx_equal_p (XEXP (op0, 1), op1))))
3206 return op2;
3207
3208 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3209 {
3210 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3211 ? GET_MODE (XEXP (op0, 1))
3212 : GET_MODE (XEXP (op0, 0)));
3213 rtx temp;
3214
3215 /* Look for happy constants in op1 and op2. */
3216 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3217 {
3218 HOST_WIDE_INT t = INTVAL (op1);
3219 HOST_WIDE_INT f = INTVAL (op2);
3220
3221 if (t == STORE_FLAG_VALUE && f == 0)
3222 code = GET_CODE (op0);
3223 else if (t == 0 && f == STORE_FLAG_VALUE)
3224 {
3225 enum rtx_code tmp;
3226 tmp = reversed_comparison_code (op0, NULL_RTX);
3227 if (tmp == UNKNOWN)
3228 break;
3229 code = tmp;
3230 }
3231 else
3232 break;
3233
3234 return simplify_gen_relational (code, mode, cmp_mode,
3235 XEXP (op0, 0), XEXP (op0, 1));
3236 }
3237
3238 if (cmp_mode == VOIDmode)
3239 cmp_mode = op0_mode;
3240 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3241 cmp_mode, XEXP (op0, 0),
3242 XEXP (op0, 1));
3243
3244 /* See if any simplifications were possible. */
3245 if (temp)
3246 {
3247 if (GET_CODE (temp) == CONST_INT)
3248 return temp == const0_rtx ? op2 : op1;
3249 else if (temp)
3250 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3251 }
3252 }
3253 break;
3254
3255 case VEC_MERGE:
3256 gcc_assert (GET_MODE (op0) == mode);
3257 gcc_assert (GET_MODE (op1) == mode);
3258 gcc_assert (VECTOR_MODE_P (mode));
3259 op2 = avoid_constant_pool_reference (op2);
3260 if (GET_CODE (op2) == CONST_INT)
3261 {
3262 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3263 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3264 int mask = (1 << n_elts) - 1;
3265
3266 if (!(INTVAL (op2) & mask))
3267 return op1;
3268 if ((INTVAL (op2) & mask) == mask)
3269 return op0;
3270
3271 op0 = avoid_constant_pool_reference (op0);
3272 op1 = avoid_constant_pool_reference (op1);
3273 if (GET_CODE (op0) == CONST_VECTOR
3274 && GET_CODE (op1) == CONST_VECTOR)
3275 {
3276 rtvec v = rtvec_alloc (n_elts);
3277 unsigned int i;
3278
3279 for (i = 0; i < n_elts; i++)
3280 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3281 ? CONST_VECTOR_ELT (op0, i)
3282 : CONST_VECTOR_ELT (op1, i));
3283 return gen_rtx_CONST_VECTOR (mode, v);
3284 }
3285 }
3286 break;
3287
3288 default:
3289 gcc_unreachable ();
3290 }
3291
3292 return 0;
3293 }
3294
3295 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3296 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3297
3298 Works by unpacking OP into a collection of 8-bit values
3299 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3300 and then repacking them again for OUTERMODE. */
3301
3302 static rtx
3303 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3304 enum machine_mode innermode, unsigned int byte)
3305 {
3306 /* We support up to 512-bit values (for V8DFmode). */
3307 enum {
3308 max_bitsize = 512,
3309 value_bit = 8,
3310 value_mask = (1 << value_bit) - 1
3311 };
3312 unsigned char value[max_bitsize / value_bit];
3313 int value_start;
3314 int i;
3315 int elem;
3316
3317 int num_elem;
3318 rtx * elems;
3319 int elem_bitsize;
3320 rtx result_s;
3321 rtvec result_v = NULL;
3322 enum mode_class outer_class;
3323 enum machine_mode outer_submode;
3324
3325 /* Some ports misuse CCmode. */
3326 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3327 return op;
3328
3329 /* Unpack the value. */
3330
3331 if (GET_CODE (op) == CONST_VECTOR)
3332 {
3333 num_elem = CONST_VECTOR_NUNITS (op);
3334 elems = &CONST_VECTOR_ELT (op, 0);
3335 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3336 }
3337 else
3338 {
3339 num_elem = 1;
3340 elems = &op;
3341 elem_bitsize = max_bitsize;
3342 }
3343 /* If this asserts, it is too complicated; reducing value_bit may help. */
3344 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3345 /* I don't know how to handle endianness of sub-units. */
3346 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3347
3348 for (elem = 0; elem < num_elem; elem++)
3349 {
3350 unsigned char * vp;
3351 rtx el = elems[elem];
3352
3353 /* Vectors are kept in target memory order. (This is probably
3354 a mistake.) */
3355 {
3356 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3357 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3358 / BITS_PER_UNIT);
3359 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3360 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3361 unsigned bytele = (subword_byte % UNITS_PER_WORD
3362 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3363 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3364 }
3365
3366 switch (GET_CODE (el))
3367 {
3368 case CONST_INT:
3369 for (i = 0;
3370 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3371 i += value_bit)
3372 *vp++ = INTVAL (el) >> i;
3373 /* CONST_INTs are always logically sign-extended. */
3374 for (; i < elem_bitsize; i += value_bit)
3375 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3376 break;
3377
3378 case CONST_DOUBLE:
3379 if (GET_MODE (el) == VOIDmode)
3380 {
3381 /* If this triggers, someone should have generated a
3382 CONST_INT instead. */
3383 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3384
3385 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3386 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3387 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3388 {
3389 *vp++
3390 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3391 i += value_bit;
3392 }
3393 /* It shouldn't matter what's done here, so fill it with
3394 zero. */
3395 for (; i < max_bitsize; i += value_bit)
3396 *vp++ = 0;
3397 }
3398 else
3399 {
3400 long tmp[max_bitsize / 32];
3401 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3402
3403 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3404 gcc_assert (bitsize <= elem_bitsize);
3405 gcc_assert (bitsize % value_bit == 0);
3406
3407 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3408 GET_MODE (el));
3409
3410 /* real_to_target produces its result in words affected by
3411 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3412 and use WORDS_BIG_ENDIAN instead; see the documentation
3413 of SUBREG in rtl.texi. */
3414 for (i = 0; i < bitsize; i += value_bit)
3415 {
3416 int ibase;
3417 if (WORDS_BIG_ENDIAN)
3418 ibase = bitsize - 1 - i;
3419 else
3420 ibase = i;
3421 *vp++ = tmp[ibase / 32] >> i % 32;
3422 }
3423
3424 /* It shouldn't matter what's done here, so fill it with
3425 zero. */
3426 for (; i < elem_bitsize; i += value_bit)
3427 *vp++ = 0;
3428 }
3429 break;
3430
3431 default:
3432 gcc_unreachable ();
3433 }
3434 }
3435
3436 /* Now, pick the right byte to start with. */
3437 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3438 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3439 will already have offset 0. */
3440 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3441 {
3442 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3443 - byte);
3444 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3445 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3446 byte = (subword_byte % UNITS_PER_WORD
3447 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3448 }
3449
3450 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3451 so if it's become negative it will instead be very large.) */
3452 gcc_assert (byte < GET_MODE_SIZE (innermode));
3453
3454 /* Convert from bytes to chunks of size value_bit. */
3455 value_start = byte * (BITS_PER_UNIT / value_bit);
3456
3457 /* Re-pack the value. */
3458
3459 if (VECTOR_MODE_P (outermode))
3460 {
3461 num_elem = GET_MODE_NUNITS (outermode);
3462 result_v = rtvec_alloc (num_elem);
3463 elems = &RTVEC_ELT (result_v, 0);
3464 outer_submode = GET_MODE_INNER (outermode);
3465 }
3466 else
3467 {
3468 num_elem = 1;
3469 elems = &result_s;
3470 outer_submode = outermode;
3471 }
3472
3473 outer_class = GET_MODE_CLASS (outer_submode);
3474 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3475
3476 gcc_assert (elem_bitsize % value_bit == 0);
3477 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3478
3479 for (elem = 0; elem < num_elem; elem++)
3480 {
3481 unsigned char *vp;
3482
3483 /* Vectors are stored in target memory order. (This is probably
3484 a mistake.) */
3485 {
3486 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3487 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3488 / BITS_PER_UNIT);
3489 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3490 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3491 unsigned bytele = (subword_byte % UNITS_PER_WORD
3492 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3493 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3494 }
3495
3496 switch (outer_class)
3497 {
3498 case MODE_INT:
3499 case MODE_PARTIAL_INT:
3500 {
3501 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3502
3503 for (i = 0;
3504 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3505 i += value_bit)
3506 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3507 for (; i < elem_bitsize; i += value_bit)
3508 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3509 << (i - HOST_BITS_PER_WIDE_INT));
3510
3511 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3512 know why. */
3513 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3514 elems[elem] = gen_int_mode (lo, outer_submode);
3515 else
3516 elems[elem] = immed_double_const (lo, hi, outer_submode);
3517 }
3518 break;
3519
3520 case MODE_FLOAT:
3521 {
3522 REAL_VALUE_TYPE r;
3523 long tmp[max_bitsize / 32];
3524
3525 /* real_from_target wants its input in words affected by
3526 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3527 and use WORDS_BIG_ENDIAN instead; see the documentation
3528 of SUBREG in rtl.texi. */
3529 for (i = 0; i < max_bitsize / 32; i++)
3530 tmp[i] = 0;
3531 for (i = 0; i < elem_bitsize; i += value_bit)
3532 {
3533 int ibase;
3534 if (WORDS_BIG_ENDIAN)
3535 ibase = elem_bitsize - 1 - i;
3536 else
3537 ibase = i;
3538 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3539 }
3540
3541 real_from_target (&r, tmp, outer_submode);
3542 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3543 }
3544 break;
3545
3546 default:
3547 gcc_unreachable ();
3548 }
3549 }
3550 if (VECTOR_MODE_P (outermode))
3551 return gen_rtx_CONST_VECTOR (outermode, result_v);
3552 else
3553 return result_s;
3554 }
3555
3556 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3557 Return 0 if no simplifications are possible. */
3558 rtx
3559 simplify_subreg (enum machine_mode outermode, rtx op,
3560 enum machine_mode innermode, unsigned int byte)
3561 {
3562 /* Little bit of sanity checking. */
3563 gcc_assert (innermode != VOIDmode);
3564 gcc_assert (outermode != VOIDmode);
3565 gcc_assert (innermode != BLKmode);
3566 gcc_assert (outermode != BLKmode);
3567
3568 gcc_assert (GET_MODE (op) == innermode
3569 || GET_MODE (op) == VOIDmode);
3570
3571 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3572 gcc_assert (byte < GET_MODE_SIZE (innermode));
3573
3574 if (outermode == innermode && !byte)
3575 return op;
3576
3577 if (GET_CODE (op) == CONST_INT
3578 || GET_CODE (op) == CONST_DOUBLE
3579 || GET_CODE (op) == CONST_VECTOR)
3580 return simplify_immed_subreg (outermode, op, innermode, byte);
3581
3582 /* Changing mode twice with SUBREG => just change it once,
3583 or not at all if changing back op starting mode. */
3584 if (GET_CODE (op) == SUBREG)
3585 {
3586 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3587 int final_offset = byte + SUBREG_BYTE (op);
3588 rtx newx;
3589
3590 if (outermode == innermostmode
3591 && byte == 0 && SUBREG_BYTE (op) == 0)
3592 return SUBREG_REG (op);
3593
3594 /* The SUBREG_BYTE represents offset, as if the value were stored
3595 in memory. Irritating exception is paradoxical subreg, where
3596 we define SUBREG_BYTE to be 0. On big endian machines, this
3597 value should be negative. For a moment, undo this exception. */
3598 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3599 {
3600 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3601 if (WORDS_BIG_ENDIAN)
3602 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3603 if (BYTES_BIG_ENDIAN)
3604 final_offset += difference % UNITS_PER_WORD;
3605 }
3606 if (SUBREG_BYTE (op) == 0
3607 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3608 {
3609 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3610 if (WORDS_BIG_ENDIAN)
3611 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3612 if (BYTES_BIG_ENDIAN)
3613 final_offset += difference % UNITS_PER_WORD;
3614 }
3615
3616 /* See whether resulting subreg will be paradoxical. */
3617 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3618 {
3619 /* In nonparadoxical subregs we can't handle negative offsets. */
3620 if (final_offset < 0)
3621 return NULL_RTX;
3622 /* Bail out in case resulting subreg would be incorrect. */
3623 if (final_offset % GET_MODE_SIZE (outermode)
3624 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3625 return NULL_RTX;
3626 }
3627 else
3628 {
3629 int offset = 0;
3630 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3631
3632 /* In paradoxical subreg, see if we are still looking on lower part.
3633 If so, our SUBREG_BYTE will be 0. */
3634 if (WORDS_BIG_ENDIAN)
3635 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3636 if (BYTES_BIG_ENDIAN)
3637 offset += difference % UNITS_PER_WORD;
3638 if (offset == final_offset)
3639 final_offset = 0;
3640 else
3641 return NULL_RTX;
3642 }
3643
3644 /* Recurse for further possible simplifications. */
3645 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3646 final_offset);
3647 if (newx)
3648 return newx;
3649 if (validate_subreg (outermode, innermostmode,
3650 SUBREG_REG (op), final_offset))
3651 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3652 return NULL_RTX;
3653 }
3654
3655 /* SUBREG of a hard register => just change the register number
3656 and/or mode. If the hard register is not valid in that mode,
3657 suppress this simplification. If the hard register is the stack,
3658 frame, or argument pointer, leave this as a SUBREG. */
3659
3660 if (REG_P (op)
3661 && REGNO (op) < FIRST_PSEUDO_REGISTER
3662 #ifdef CANNOT_CHANGE_MODE_CLASS
3663 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3664 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3665 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3666 #endif
3667 && ((reload_completed && !frame_pointer_needed)
3668 || (REGNO (op) != FRAME_POINTER_REGNUM
3669 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3670 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3671 #endif
3672 ))
3673 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3674 && REGNO (op) != ARG_POINTER_REGNUM
3675 #endif
3676 && REGNO (op) != STACK_POINTER_REGNUM
3677 && subreg_offset_representable_p (REGNO (op), innermode,
3678 byte, outermode))
3679 {
3680 unsigned int regno = REGNO (op);
3681 unsigned int final_regno
3682 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3683
3684 /* ??? We do allow it if the current REG is not valid for
3685 its mode. This is a kludge to work around how float/complex
3686 arguments are passed on 32-bit SPARC and should be fixed. */
3687 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3688 || ! HARD_REGNO_MODE_OK (regno, innermode))
3689 {
3690 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3691
3692 /* Propagate original regno. We don't have any way to specify
3693 the offset inside original regno, so do so only for lowpart.
3694 The information is used only by alias analysis that can not
3695 grog partial register anyway. */
3696
3697 if (subreg_lowpart_offset (outermode, innermode) == byte)
3698 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3699 return x;
3700 }
3701 }
3702
3703 /* If we have a SUBREG of a register that we are replacing and we are
3704 replacing it with a MEM, make a new MEM and try replacing the
3705 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3706 or if we would be widening it. */
3707
3708 if (MEM_P (op)
3709 && ! mode_dependent_address_p (XEXP (op, 0))
3710 /* Allow splitting of volatile memory references in case we don't
3711 have instruction to move the whole thing. */
3712 && (! MEM_VOLATILE_P (op)
3713 || ! have_insn_for (SET, innermode))
3714 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3715 return adjust_address_nv (op, outermode, byte);
3716
3717 /* Handle complex values represented as CONCAT
3718 of real and imaginary part. */
3719 if (GET_CODE (op) == CONCAT)
3720 {
3721 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3722 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3723 unsigned int final_offset;
3724 rtx res;
3725
3726 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3727 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3728 if (res)
3729 return res;
3730 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3731 return gen_rtx_SUBREG (outermode, part, final_offset);
3732 return NULL_RTX;
3733 }
3734
3735 /* Optimize SUBREG truncations of zero and sign extended values. */
3736 if ((GET_CODE (op) == ZERO_EXTEND
3737 || GET_CODE (op) == SIGN_EXTEND)
3738 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3739 {
3740 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3741
3742 /* If we're requesting the lowpart of a zero or sign extension,
3743 there are three possibilities. If the outermode is the same
3744 as the origmode, we can omit both the extension and the subreg.
3745 If the outermode is not larger than the origmode, we can apply
3746 the truncation without the extension. Finally, if the outermode
3747 is larger than the origmode, but both are integer modes, we
3748 can just extend to the appropriate mode. */
3749 if (bitpos == 0)
3750 {
3751 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3752 if (outermode == origmode)
3753 return XEXP (op, 0);
3754 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3755 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3756 subreg_lowpart_offset (outermode,
3757 origmode));
3758 if (SCALAR_INT_MODE_P (outermode))
3759 return simplify_gen_unary (GET_CODE (op), outermode,
3760 XEXP (op, 0), origmode);
3761 }
3762
3763 /* A SUBREG resulting from a zero extension may fold to zero if
3764 it extracts higher bits that the ZERO_EXTEND's source bits. */
3765 if (GET_CODE (op) == ZERO_EXTEND
3766 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3767 return CONST0_RTX (outermode);
3768 }
3769
3770 return NULL_RTX;
3771 }
3772
3773 /* Make a SUBREG operation or equivalent if it folds. */
3774
3775 rtx
3776 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3777 enum machine_mode innermode, unsigned int byte)
3778 {
3779 rtx newx;
3780
3781 newx = simplify_subreg (outermode, op, innermode, byte);
3782 if (newx)
3783 return newx;
3784
3785 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode
3786 || (REG_P (op) && REGNO (op) < FIRST_PSEUDO_REGISTER))
3787 return NULL_RTX;
3788
3789 if (validate_subreg (outermode, innermode, op, byte))
3790 return gen_rtx_SUBREG (outermode, op, byte);
3791
3792 return NULL_RTX;
3793 }
3794
3795 /* Simplify X, an rtx expression.
3796
3797 Return the simplified expression or NULL if no simplifications
3798 were possible.
3799
3800 This is the preferred entry point into the simplification routines;
3801 however, we still allow passes to call the more specific routines.
3802
3803 Right now GCC has three (yes, three) major bodies of RTL simplification
3804 code that need to be unified.
3805
3806 1. fold_rtx in cse.c. This code uses various CSE specific
3807 information to aid in RTL simplification.
3808
3809 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3810 it uses combine specific information to aid in RTL
3811 simplification.
3812
3813 3. The routines in this file.
3814
3815
3816 Long term we want to only have one body of simplification code; to
3817 get to that state I recommend the following steps:
3818
3819 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3820 which are not pass dependent state into these routines.
3821
3822 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3823 use this routine whenever possible.
3824
3825 3. Allow for pass dependent state to be provided to these
3826 routines and add simplifications based on the pass dependent
3827 state. Remove code from cse.c & combine.c that becomes
3828 redundant/dead.
3829
3830 It will take time, but ultimately the compiler will be easier to
3831 maintain and improve. It's totally silly that when we add a
3832 simplification that it needs to be added to 4 places (3 for RTL
3833 simplification and 1 for tree simplification. */
3834
3835 rtx
3836 simplify_rtx (rtx x)
3837 {
3838 enum rtx_code code = GET_CODE (x);
3839 enum machine_mode mode = GET_MODE (x);
3840
3841 switch (GET_RTX_CLASS (code))
3842 {
3843 case RTX_UNARY:
3844 return simplify_unary_operation (code, mode,
3845 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3846 case RTX_COMM_ARITH:
3847 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3848 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3849
3850 /* Fall through.... */
3851
3852 case RTX_BIN_ARITH:
3853 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3854
3855 case RTX_TERNARY:
3856 case RTX_BITFIELD_OPS:
3857 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3858 XEXP (x, 0), XEXP (x, 1),
3859 XEXP (x, 2));
3860
3861 case RTX_COMPARE:
3862 case RTX_COMM_COMPARE:
3863 return simplify_relational_operation (code, mode,
3864 ((GET_MODE (XEXP (x, 0))
3865 != VOIDmode)
3866 ? GET_MODE (XEXP (x, 0))
3867 : GET_MODE (XEXP (x, 1))),
3868 XEXP (x, 0),
3869 XEXP (x, 1));
3870
3871 case RTX_EXTRA:
3872 if (code == SUBREG)
3873 return simplify_gen_subreg (mode, SUBREG_REG (x),
3874 GET_MODE (SUBREG_REG (x)),
3875 SUBREG_BYTE (x));
3876 break;
3877
3878 case RTX_OBJ:
3879 if (code == LO_SUM)
3880 {
3881 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3882 if (GET_CODE (XEXP (x, 0)) == HIGH
3883 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3884 return XEXP (x, 1);
3885 }
3886 break;
3887
3888 default:
3889 break;
3890 }
3891 return NULL;
3892 }