re PR rtl-optimization/18942 (Do loop is not as optimized as 3.3.2)
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 return false;
100
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 }
105 \f
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
108
109 rtx
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
112 {
113 rtx tem;
114
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
119
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
124
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
127
128 if (code == PLUS || code == MINUS)
129 {
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
133 }
134
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 }
137 \f
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
140 rtx
141 avoid_constant_pool_reference (rtx x)
142 {
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
145
146 switch (GET_CODE (x))
147 {
148 case MEM:
149 break;
150
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
156 {
157 REAL_VALUE_TYPE d;
158
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 }
162 return x;
163
164 default:
165 return x;
166 }
167
168 addr = XEXP (x, 0);
169
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
172
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
175
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
179
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
182
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
187 {
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
190 }
191
192 return c;
193 }
194 \f
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
197
198 rtx
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
201 {
202 rtx tem;
203
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
207
208 return gen_rtx_fmt_e (code, mode, op);
209 }
210
211 /* Likewise for ternary operations. */
212
213 rtx
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
223
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
225 }
226
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
229
230 rtx
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
233 {
234 rtx tem;
235
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
239
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
241 }
242 \f
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
245
246 rtx
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
248 {
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
253
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
257
258 if (x == old_rtx)
259 return new_rtx;
260
261 switch (GET_RTX_CLASS (code))
262 {
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
270
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
278
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
289
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
302
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
306 {
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
314 }
315 break;
316
317 case RTX_OBJ:
318 if (code == MEM)
319 {
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
324 }
325 else if (code == LO_SUM)
326 {
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
329
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
333
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
337 }
338 else if (code == REG)
339 {
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
342 }
343 break;
344
345 default:
346 break;
347 }
348 return x;
349 }
350 \f
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
354 rtx
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
357 {
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
360
361 if (code == VEC_DUPLICATE)
362 {
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
365 {
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
371 }
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
374 {
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
379
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
384 {
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
388
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
393 }
394 return gen_rtx_CONST_VECTOR (mode, v);
395 }
396 }
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
399
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
401 {
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
409
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
412 {
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
419 }
420 return gen_rtx_CONST_VECTOR (mode, v);
421 }
422
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
426
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 {
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
432
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
441 }
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
445 {
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
448
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
453
454 if (op_mode == VOIDmode)
455 {
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
460 }
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
462 ;
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
465
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
469 }
470
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
473 {
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
476
477 switch (code)
478 {
479 case NOT:
480 val = ~ arg0;
481 break;
482
483 case NEG:
484 val = - arg0;
485 break;
486
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
490
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
497
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
501 ;
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
505
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
509 {
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
514 }
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
518
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
525
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
533
534 case TRUNCATE:
535 val = arg0;
536 break;
537
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
543 {
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
549 }
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
555
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
560 {
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
566 }
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 {
569 val
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
574 }
575 else
576 return 0;
577 break;
578
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
585
586 default:
587 gcc_unreachable ();
588 }
589
590 val = trunc_int_for_mode (val, mode);
591
592 return GEN_INT (val);
593 }
594
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
601 {
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
604
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
609
610 switch (code)
611 {
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
616
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
620
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
627
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
631 {
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
636 }
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
640
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
651
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
661
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
670
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
680
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
685
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
688
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
691
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
695
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
701 {
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
707
708 hv = HWI_SIGN_EXTEND (lv);
709 }
710 break;
711
712 case SQRT:
713 return 0;
714
715 default:
716 return 0;
717 }
718
719 return immed_double_const (lv, hv, mode);
720 }
721
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
724 {
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
727
728 switch (code)
729 {
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
752 {
753 long tmp[4];
754 int i;
755
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
760 }
761 default:
762 gcc_unreachable ();
763 }
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
765 }
766
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
771 {
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
776
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
780 switch (code)
781 {
782 case FIX:
783 if (REAL_VALUE_ISNAN (x))
784 return const0_rtx;
785
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
788 {
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
791 tl = -1;
792 }
793 else
794 {
795 th = 0;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
797 }
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
800 {
801 xh = th;
802 xl = tl;
803 break;
804 }
805
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
808 {
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
810 tl = 0;
811 }
812 else
813 {
814 th = -1;
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
816 }
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
819 {
820 xh = th;
821 xl = tl;
822 break;
823 }
824 REAL_VALUE_TO_INT (&xl, &xh, x);
825 break;
826
827 case UNSIGNED_FIX:
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
829 return const0_rtx;
830
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
833 {
834 th = -1;
835 tl = -1;
836 }
837 else if (width >= HOST_BITS_PER_WIDE_INT)
838 {
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
841 tl = -1;
842 }
843 else
844 {
845 th = 0;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
847 }
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
850 {
851 xh = th;
852 xl = tl;
853 break;
854 }
855
856 REAL_VALUE_TO_INT (&xl, &xh, x);
857 break;
858
859 default:
860 gcc_unreachable ();
861 }
862 return immed_double_const (xl, xh, mode);
863 }
864
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
867 else
868 {
869 enum rtx_code reversed;
870 rtx temp;
871
872 /* There are some simplifications we can do even if the operands
873 aren't constant. */
874 switch (code)
875 {
876 case NOT:
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
879 return XEXP (op, 0);
880
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
885 != UNKNOWN))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
888
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
893
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
897
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
902 XEXP (op, 1),
903 mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
905
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
914
915
916
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
924 {
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
927 }
928
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && COMPARISON_P (op)
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
937
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
941
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
948
949 break;
950
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
955
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
964
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
975
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 {
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
983 {
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
989 }
990
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
994 }
995
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 {
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1003 }
1004
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1009 {
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1015 }
1016
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1024
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1032
1033 break;
1034
1035 case SIGN_EXTEND:
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1039 the VAX). */
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1046
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1055
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1059 && (CONSTANT_P (op)
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1065 #endif
1066 break;
1067
1068 case ZERO_EXTEND:
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1077
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1081 && (CONSTANT_P (op)
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1087 #endif
1088 break;
1089
1090 default:
1091 break;
1092 }
1093
1094 return 0;
1095 }
1096 }
1097 \f
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1103
1104 static rtx
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1106 rtx op0, rtx op1)
1107 {
1108 rtx tem;
1109
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1112 {
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1115 {
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1118 }
1119
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1123
1124 tem = op0;
1125 op0 = op1;
1126 op1 = tem;
1127 }
1128
1129 if (GET_CODE (op0) == code)
1130 {
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1133 {
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1136 }
1137
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1142 if (tem != 0)
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1144
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 }
1152
1153 return 0;
1154 }
1155
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1158
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1161 rtx
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1164 {
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1170
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1177
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1181 {
1182 tem = op0, op0 = op1, op1 = tem;
1183 }
1184
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1187
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1192 {
1193 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1194 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1197 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1198 enum machine_mode op1mode = GET_MODE (trueop1);
1199 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1200 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1201 rtvec v = rtvec_alloc (n_elts);
1202 unsigned int i;
1203
1204 gcc_assert (op0_n_elts == n_elts);
1205 gcc_assert (op1_n_elts == n_elts);
1206 for (i = 0; i < n_elts; i++)
1207 {
1208 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1209 CONST_VECTOR_ELT (trueop0, i),
1210 CONST_VECTOR_ELT (trueop1, i));
1211 if (!x)
1212 return 0;
1213 RTVEC_ELT (v, i) = x;
1214 }
1215
1216 return gen_rtx_CONST_VECTOR (mode, v);
1217 }
1218
1219 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1220 && GET_CODE (trueop0) == CONST_DOUBLE
1221 && GET_CODE (trueop1) == CONST_DOUBLE
1222 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1223 {
1224 if (code == AND
1225 || code == IOR
1226 || code == XOR)
1227 {
1228 long tmp0[4];
1229 long tmp1[4];
1230 REAL_VALUE_TYPE r;
1231 int i;
1232
1233 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1234 GET_MODE (op0));
1235 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1236 GET_MODE (op1));
1237 for (i = 0; i < 4; i++)
1238 {
1239 switch (code)
1240 {
1241 case AND:
1242 tmp0[i] &= tmp1[i];
1243 break;
1244 case IOR:
1245 tmp0[i] |= tmp1[i];
1246 break;
1247 case XOR:
1248 tmp0[i] ^= tmp1[i];
1249 break;
1250 default:
1251 gcc_unreachable ();
1252 }
1253 }
1254 real_from_target (&r, tmp0, mode);
1255 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1256 }
1257 else
1258 {
1259 REAL_VALUE_TYPE f0, f1, value;
1260
1261 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1262 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1263 f0 = real_value_truncate (mode, f0);
1264 f1 = real_value_truncate (mode, f1);
1265
1266 if (HONOR_SNANS (mode)
1267 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1268 return 0;
1269
1270 if (code == DIV
1271 && REAL_VALUES_EQUAL (f1, dconst0)
1272 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1273 return 0;
1274
1275 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1276 && flag_trapping_math
1277 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1278 {
1279 int s0 = REAL_VALUE_NEGATIVE (f0);
1280 int s1 = REAL_VALUE_NEGATIVE (f1);
1281
1282 switch (code)
1283 {
1284 case PLUS:
1285 /* Inf + -Inf = NaN plus exception. */
1286 if (s0 != s1)
1287 return 0;
1288 break;
1289 case MINUS:
1290 /* Inf - Inf = NaN plus exception. */
1291 if (s0 == s1)
1292 return 0;
1293 break;
1294 case DIV:
1295 /* Inf / Inf = NaN plus exception. */
1296 return 0;
1297 default:
1298 break;
1299 }
1300 }
1301
1302 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1303 && flag_trapping_math
1304 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1305 || (REAL_VALUE_ISINF (f1)
1306 && REAL_VALUES_EQUAL (f0, dconst0))))
1307 /* Inf * 0 = NaN plus exception. */
1308 return 0;
1309
1310 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1311
1312 value = real_value_truncate (mode, value);
1313 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1314 }
1315 }
1316
1317 /* We can fold some multi-word operations. */
1318 if (GET_MODE_CLASS (mode) == MODE_INT
1319 && width == HOST_BITS_PER_WIDE_INT * 2
1320 && (GET_CODE (trueop0) == CONST_DOUBLE
1321 || GET_CODE (trueop0) == CONST_INT)
1322 && (GET_CODE (trueop1) == CONST_DOUBLE
1323 || GET_CODE (trueop1) == CONST_INT))
1324 {
1325 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1326 HOST_WIDE_INT h1, h2, hv, ht;
1327
1328 if (GET_CODE (trueop0) == CONST_DOUBLE)
1329 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1330 else
1331 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1332
1333 if (GET_CODE (trueop1) == CONST_DOUBLE)
1334 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1335 else
1336 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1337
1338 switch (code)
1339 {
1340 case MINUS:
1341 /* A - B == A + (-B). */
1342 neg_double (l2, h2, &lv, &hv);
1343 l2 = lv, h2 = hv;
1344
1345 /* Fall through.... */
1346
1347 case PLUS:
1348 add_double (l1, h1, l2, h2, &lv, &hv);
1349 break;
1350
1351 case MULT:
1352 mul_double (l1, h1, l2, h2, &lv, &hv);
1353 break;
1354
1355 case DIV:
1356 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1357 &lv, &hv, &lt, &ht))
1358 return 0;
1359 break;
1360
1361 case MOD:
1362 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1363 &lt, &ht, &lv, &hv))
1364 return 0;
1365 break;
1366
1367 case UDIV:
1368 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1369 &lv, &hv, &lt, &ht))
1370 return 0;
1371 break;
1372
1373 case UMOD:
1374 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1375 &lt, &ht, &lv, &hv))
1376 return 0;
1377 break;
1378
1379 case AND:
1380 lv = l1 & l2, hv = h1 & h2;
1381 break;
1382
1383 case IOR:
1384 lv = l1 | l2, hv = h1 | h2;
1385 break;
1386
1387 case XOR:
1388 lv = l1 ^ l2, hv = h1 ^ h2;
1389 break;
1390
1391 case SMIN:
1392 if (h1 < h2
1393 || (h1 == h2
1394 && ((unsigned HOST_WIDE_INT) l1
1395 < (unsigned HOST_WIDE_INT) l2)))
1396 lv = l1, hv = h1;
1397 else
1398 lv = l2, hv = h2;
1399 break;
1400
1401 case SMAX:
1402 if (h1 > h2
1403 || (h1 == h2
1404 && ((unsigned HOST_WIDE_INT) l1
1405 > (unsigned HOST_WIDE_INT) l2)))
1406 lv = l1, hv = h1;
1407 else
1408 lv = l2, hv = h2;
1409 break;
1410
1411 case UMIN:
1412 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1413 || (h1 == h2
1414 && ((unsigned HOST_WIDE_INT) l1
1415 < (unsigned HOST_WIDE_INT) l2)))
1416 lv = l1, hv = h1;
1417 else
1418 lv = l2, hv = h2;
1419 break;
1420
1421 case UMAX:
1422 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1423 || (h1 == h2
1424 && ((unsigned HOST_WIDE_INT) l1
1425 > (unsigned HOST_WIDE_INT) l2)))
1426 lv = l1, hv = h1;
1427 else
1428 lv = l2, hv = h2;
1429 break;
1430
1431 case LSHIFTRT: case ASHIFTRT:
1432 case ASHIFT:
1433 case ROTATE: case ROTATERT:
1434 if (SHIFT_COUNT_TRUNCATED)
1435 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1436
1437 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1438 return 0;
1439
1440 if (code == LSHIFTRT || code == ASHIFTRT)
1441 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1442 code == ASHIFTRT);
1443 else if (code == ASHIFT)
1444 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1445 else if (code == ROTATE)
1446 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1447 else /* code == ROTATERT */
1448 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1449 break;
1450
1451 default:
1452 return 0;
1453 }
1454
1455 return immed_double_const (lv, hv, mode);
1456 }
1457
1458 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1459 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1460 {
1461 /* Even if we can't compute a constant result,
1462 there are some cases worth simplifying. */
1463
1464 switch (code)
1465 {
1466 case PLUS:
1467 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1468 when x is NaN, infinite, or finite and nonzero. They aren't
1469 when x is -0 and the rounding mode is not towards -infinity,
1470 since (-0) + 0 is then 0. */
1471 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1472 return op0;
1473
1474 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1475 transformations are safe even for IEEE. */
1476 if (GET_CODE (op0) == NEG)
1477 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1478 else if (GET_CODE (op1) == NEG)
1479 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1480
1481 /* (~a) + 1 -> -a */
1482 if (INTEGRAL_MODE_P (mode)
1483 && GET_CODE (op0) == NOT
1484 && trueop1 == const1_rtx)
1485 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1486
1487 /* Handle both-operands-constant cases. We can only add
1488 CONST_INTs to constants since the sum of relocatable symbols
1489 can't be handled by most assemblers. Don't add CONST_INT
1490 to CONST_INT since overflow won't be computed properly if wider
1491 than HOST_BITS_PER_WIDE_INT. */
1492
1493 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1494 && GET_CODE (op1) == CONST_INT)
1495 return plus_constant (op0, INTVAL (op1));
1496 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1497 && GET_CODE (op0) == CONST_INT)
1498 return plus_constant (op1, INTVAL (op0));
1499
1500 /* See if this is something like X * C - X or vice versa or
1501 if the multiplication is written as a shift. If so, we can
1502 distribute and make a new multiply, shift, or maybe just
1503 have X (if C is 2 in the example above). But don't make
1504 something more expensive than we had before. */
1505
1506 if (! FLOAT_MODE_P (mode))
1507 {
1508 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1509 rtx lhs = op0, rhs = op1;
1510
1511 if (GET_CODE (lhs) == NEG)
1512 coeff0 = -1, lhs = XEXP (lhs, 0);
1513 else if (GET_CODE (lhs) == MULT
1514 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1515 {
1516 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1517 }
1518 else if (GET_CODE (lhs) == ASHIFT
1519 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1520 && INTVAL (XEXP (lhs, 1)) >= 0
1521 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1522 {
1523 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1524 lhs = XEXP (lhs, 0);
1525 }
1526
1527 if (GET_CODE (rhs) == NEG)
1528 coeff1 = -1, rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == MULT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1531 {
1532 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1533 }
1534 else if (GET_CODE (rhs) == ASHIFT
1535 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1536 && INTVAL (XEXP (rhs, 1)) >= 0
1537 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1538 {
1539 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1540 rhs = XEXP (rhs, 0);
1541 }
1542
1543 if (rtx_equal_p (lhs, rhs))
1544 {
1545 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1546 tem = simplify_gen_binary (MULT, mode, lhs,
1547 GEN_INT (coeff0 + coeff1));
1548 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1549 ? tem : 0;
1550 }
1551 }
1552
1553 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1554 if ((GET_CODE (op1) == CONST_INT
1555 || GET_CODE (op1) == CONST_DOUBLE)
1556 && GET_CODE (op0) == XOR
1557 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1558 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1559 && mode_signbit_p (mode, op1))
1560 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1561 simplify_gen_binary (XOR, mode, op1,
1562 XEXP (op0, 1)));
1563
1564 /* If one of the operands is a PLUS or a MINUS, see if we can
1565 simplify this by the associative law.
1566 Don't use the associative law for floating point.
1567 The inaccuracy makes it nonassociative,
1568 and subtle programs can break if operations are associated. */
1569
1570 if (INTEGRAL_MODE_P (mode)
1571 && (plus_minus_operand_p (op0)
1572 || plus_minus_operand_p (op1))
1573 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1574 return tem;
1575
1576 /* Reassociate floating point addition only when the user
1577 specifies unsafe math optimizations. */
1578 if (FLOAT_MODE_P (mode)
1579 && flag_unsafe_math_optimizations)
1580 {
1581 tem = simplify_associative_operation (code, mode, op0, op1);
1582 if (tem)
1583 return tem;
1584 }
1585 break;
1586
1587 case COMPARE:
1588 #ifdef HAVE_cc0
1589 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1590 using cc0, in which case we want to leave it as a COMPARE
1591 so we can distinguish it from a register-register-copy.
1592
1593 In IEEE floating point, x-0 is not the same as x. */
1594
1595 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1596 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1597 && trueop1 == CONST0_RTX (mode))
1598 return op0;
1599 #endif
1600
1601 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1602 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1603 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1604 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1605 {
1606 rtx xop00 = XEXP (op0, 0);
1607 rtx xop10 = XEXP (op1, 0);
1608
1609 #ifdef HAVE_cc0
1610 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1611 #else
1612 if (REG_P (xop00) && REG_P (xop10)
1613 && GET_MODE (xop00) == GET_MODE (xop10)
1614 && REGNO (xop00) == REGNO (xop10)
1615 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1616 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1617 #endif
1618 return xop00;
1619 }
1620 break;
1621
1622 case MINUS:
1623 /* We can't assume x-x is 0 even with non-IEEE floating point,
1624 but since it is zero except in very strange circumstances, we
1625 will treat it as zero with -funsafe-math-optimizations. */
1626 if (rtx_equal_p (trueop0, trueop1)
1627 && ! side_effects_p (op0)
1628 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1629 return CONST0_RTX (mode);
1630
1631 /* Change subtraction from zero into negation. (0 - x) is the
1632 same as -x when x is NaN, infinite, or finite and nonzero.
1633 But if the mode has signed zeros, and does not round towards
1634 -infinity, then 0 - 0 is 0, not -0. */
1635 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1636 return simplify_gen_unary (NEG, mode, op1, mode);
1637
1638 /* (-1 - a) is ~a. */
1639 if (trueop0 == constm1_rtx)
1640 return simplify_gen_unary (NOT, mode, op1, mode);
1641
1642 /* Subtracting 0 has no effect unless the mode has signed zeros
1643 and supports rounding towards -infinity. In such a case,
1644 0 - 0 is -0. */
1645 if (!(HONOR_SIGNED_ZEROS (mode)
1646 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1647 && trueop1 == CONST0_RTX (mode))
1648 return op0;
1649
1650 /* See if this is something like X * C - X or vice versa or
1651 if the multiplication is written as a shift. If so, we can
1652 distribute and make a new multiply, shift, or maybe just
1653 have X (if C is 2 in the example above). But don't make
1654 something more expensive than we had before. */
1655
1656 if (! FLOAT_MODE_P (mode))
1657 {
1658 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1659 rtx lhs = op0, rhs = op1;
1660
1661 if (GET_CODE (lhs) == NEG)
1662 coeff0 = -1, lhs = XEXP (lhs, 0);
1663 else if (GET_CODE (lhs) == MULT
1664 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1665 {
1666 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1667 }
1668 else if (GET_CODE (lhs) == ASHIFT
1669 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1670 && INTVAL (XEXP (lhs, 1)) >= 0
1671 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1672 {
1673 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1674 lhs = XEXP (lhs, 0);
1675 }
1676
1677 if (GET_CODE (rhs) == NEG)
1678 coeff1 = - 1, rhs = XEXP (rhs, 0);
1679 else if (GET_CODE (rhs) == MULT
1680 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1681 {
1682 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1683 }
1684 else if (GET_CODE (rhs) == ASHIFT
1685 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1686 && INTVAL (XEXP (rhs, 1)) >= 0
1687 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1688 {
1689 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1690 rhs = XEXP (rhs, 0);
1691 }
1692
1693 if (rtx_equal_p (lhs, rhs))
1694 {
1695 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1696 tem = simplify_gen_binary (MULT, mode, lhs,
1697 GEN_INT (coeff0 - coeff1));
1698 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1699 ? tem : 0;
1700 }
1701 }
1702
1703 /* (a - (-b)) -> (a + b). True even for IEEE. */
1704 if (GET_CODE (op1) == NEG)
1705 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1706
1707 /* (-x - c) may be simplified as (-c - x). */
1708 if (GET_CODE (op0) == NEG
1709 && (GET_CODE (op1) == CONST_INT
1710 || GET_CODE (op1) == CONST_DOUBLE))
1711 {
1712 tem = simplify_unary_operation (NEG, mode, op1, mode);
1713 if (tem)
1714 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1715 }
1716
1717 /* If one of the operands is a PLUS or a MINUS, see if we can
1718 simplify this by the associative law.
1719 Don't use the associative law for floating point.
1720 The inaccuracy makes it nonassociative,
1721 and subtle programs can break if operations are associated. */
1722
1723 if (INTEGRAL_MODE_P (mode)
1724 && (plus_minus_operand_p (op0)
1725 || plus_minus_operand_p (op1))
1726 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1727 return tem;
1728
1729 /* Don't let a relocatable value get a negative coeff. */
1730 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1731 return simplify_gen_binary (PLUS, mode,
1732 op0,
1733 neg_const_int (mode, op1));
1734
1735 /* (x - (x & y)) -> (x & ~y) */
1736 if (GET_CODE (op1) == AND)
1737 {
1738 if (rtx_equal_p (op0, XEXP (op1, 0)))
1739 {
1740 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1741 GET_MODE (XEXP (op1, 1)));
1742 return simplify_gen_binary (AND, mode, op0, tem);
1743 }
1744 if (rtx_equal_p (op0, XEXP (op1, 1)))
1745 {
1746 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1747 GET_MODE (XEXP (op1, 0)));
1748 return simplify_gen_binary (AND, mode, op0, tem);
1749 }
1750 }
1751 break;
1752
1753 case MULT:
1754 if (trueop1 == constm1_rtx)
1755 return simplify_gen_unary (NEG, mode, op0, mode);
1756
1757 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1758 x is NaN, since x * 0 is then also NaN. Nor is it valid
1759 when the mode has signed zeros, since multiplying a negative
1760 number by 0 will give -0, not 0. */
1761 if (!HONOR_NANS (mode)
1762 && !HONOR_SIGNED_ZEROS (mode)
1763 && trueop1 == CONST0_RTX (mode)
1764 && ! side_effects_p (op0))
1765 return op1;
1766
1767 /* In IEEE floating point, x*1 is not equivalent to x for
1768 signalling NaNs. */
1769 if (!HONOR_SNANS (mode)
1770 && trueop1 == CONST1_RTX (mode))
1771 return op0;
1772
1773 /* Convert multiply by constant power of two into shift unless
1774 we are still generating RTL. This test is a kludge. */
1775 if (GET_CODE (trueop1) == CONST_INT
1776 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1777 /* If the mode is larger than the host word size, and the
1778 uppermost bit is set, then this isn't a power of two due
1779 to implicit sign extension. */
1780 && (width <= HOST_BITS_PER_WIDE_INT
1781 || val != HOST_BITS_PER_WIDE_INT - 1))
1782 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1783
1784 /* x*2 is x+x and x*(-1) is -x */
1785 if (GET_CODE (trueop1) == CONST_DOUBLE
1786 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1787 && GET_MODE (op0) == mode)
1788 {
1789 REAL_VALUE_TYPE d;
1790 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1791
1792 if (REAL_VALUES_EQUAL (d, dconst2))
1793 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1794
1795 if (REAL_VALUES_EQUAL (d, dconstm1))
1796 return simplify_gen_unary (NEG, mode, op0, mode);
1797 }
1798
1799 /* Reassociate multiplication, but for floating point MULTs
1800 only when the user specifies unsafe math optimizations. */
1801 if (! FLOAT_MODE_P (mode)
1802 || flag_unsafe_math_optimizations)
1803 {
1804 tem = simplify_associative_operation (code, mode, op0, op1);
1805 if (tem)
1806 return tem;
1807 }
1808 break;
1809
1810 case IOR:
1811 if (trueop1 == const0_rtx)
1812 return op0;
1813 if (GET_CODE (trueop1) == CONST_INT
1814 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1815 == GET_MODE_MASK (mode)))
1816 return op1;
1817 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1818 return op0;
1819 /* A | (~A) -> -1 */
1820 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1821 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1822 && ! side_effects_p (op0)
1823 && GET_MODE_CLASS (mode) != MODE_CC)
1824 return constm1_rtx;
1825 tem = simplify_associative_operation (code, mode, op0, op1);
1826 if (tem)
1827 return tem;
1828 break;
1829
1830 case XOR:
1831 if (trueop1 == const0_rtx)
1832 return op0;
1833 if (GET_CODE (trueop1) == CONST_INT
1834 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1835 == GET_MODE_MASK (mode)))
1836 return simplify_gen_unary (NOT, mode, op0, mode);
1837 if (trueop0 == trueop1
1838 && ! side_effects_p (op0)
1839 && GET_MODE_CLASS (mode) != MODE_CC)
1840 return const0_rtx;
1841
1842 /* Canonicalize XOR of the most significant bit to PLUS. */
1843 if ((GET_CODE (op1) == CONST_INT
1844 || GET_CODE (op1) == CONST_DOUBLE)
1845 && mode_signbit_p (mode, op1))
1846 return simplify_gen_binary (PLUS, mode, op0, op1);
1847 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1848 if ((GET_CODE (op1) == CONST_INT
1849 || GET_CODE (op1) == CONST_DOUBLE)
1850 && GET_CODE (op0) == PLUS
1851 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1852 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1853 && mode_signbit_p (mode, XEXP (op0, 1)))
1854 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1855 simplify_gen_binary (XOR, mode, op1,
1856 XEXP (op0, 1)));
1857
1858 tem = simplify_associative_operation (code, mode, op0, op1);
1859 if (tem)
1860 return tem;
1861 break;
1862
1863 case AND:
1864 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1865 return const0_rtx;
1866 /* If we are turning off bits already known off in OP0, we need
1867 not do an AND. */
1868 if (GET_CODE (trueop1) == CONST_INT
1869 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1870 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1871 return op0;
1872 if (trueop0 == trueop1 && ! side_effects_p (op0)
1873 && GET_MODE_CLASS (mode) != MODE_CC)
1874 return op0;
1875 /* A & (~A) -> 0 */
1876 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1877 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1878 && ! side_effects_p (op0)
1879 && GET_MODE_CLASS (mode) != MODE_CC)
1880 return const0_rtx;
1881 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1882 ((A & N) + B) & M -> (A + B) & M
1883 Similarly if (N & M) == 0,
1884 ((A | N) + B) & M -> (A + B) & M
1885 and for - instead of + and/or ^ instead of |. */
1886 if (GET_CODE (trueop1) == CONST_INT
1887 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1888 && ~INTVAL (trueop1)
1889 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1890 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1891 {
1892 rtx pmop[2];
1893 int which;
1894
1895 pmop[0] = XEXP (op0, 0);
1896 pmop[1] = XEXP (op0, 1);
1897
1898 for (which = 0; which < 2; which++)
1899 {
1900 tem = pmop[which];
1901 switch (GET_CODE (tem))
1902 {
1903 case AND:
1904 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1905 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1906 == INTVAL (trueop1))
1907 pmop[which] = XEXP (tem, 0);
1908 break;
1909 case IOR:
1910 case XOR:
1911 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1913 pmop[which] = XEXP (tem, 0);
1914 break;
1915 default:
1916 break;
1917 }
1918 }
1919
1920 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1921 {
1922 tem = simplify_gen_binary (GET_CODE (op0), mode,
1923 pmop[0], pmop[1]);
1924 return simplify_gen_binary (code, mode, tem, op1);
1925 }
1926 }
1927 tem = simplify_associative_operation (code, mode, op0, op1);
1928 if (tem)
1929 return tem;
1930 break;
1931
1932 case UDIV:
1933 /* 0/x is 0 (or x&0 if x has side-effects). */
1934 if (trueop0 == const0_rtx)
1935 return side_effects_p (op1)
1936 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1937 : const0_rtx;
1938 /* x/1 is x. */
1939 if (trueop1 == const1_rtx)
1940 {
1941 /* Handle narrowing UDIV. */
1942 rtx x = gen_lowpart_common (mode, op0);
1943 if (x)
1944 return x;
1945 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1946 return gen_lowpart_SUBREG (mode, op0);
1947 return op0;
1948 }
1949 /* Convert divide by power of two into shift. */
1950 if (GET_CODE (trueop1) == CONST_INT
1951 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1952 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1953 break;
1954
1955 case DIV:
1956 /* Handle floating point and integers separately. */
1957 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1958 {
1959 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1960 safe for modes with NaNs, since 0.0 / 0.0 will then be
1961 NaN rather than 0.0. Nor is it safe for modes with signed
1962 zeros, since dividing 0 by a negative number gives -0.0 */
1963 if (trueop0 == CONST0_RTX (mode)
1964 && !HONOR_NANS (mode)
1965 && !HONOR_SIGNED_ZEROS (mode)
1966 && ! side_effects_p (op1))
1967 return op0;
1968 /* x/1.0 is x. */
1969 if (trueop1 == CONST1_RTX (mode)
1970 && !HONOR_SNANS (mode))
1971 return op0;
1972
1973 if (GET_CODE (trueop1) == CONST_DOUBLE
1974 && trueop1 != CONST0_RTX (mode))
1975 {
1976 REAL_VALUE_TYPE d;
1977 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1978
1979 /* x/-1.0 is -x. */
1980 if (REAL_VALUES_EQUAL (d, dconstm1)
1981 && !HONOR_SNANS (mode))
1982 return simplify_gen_unary (NEG, mode, op0, mode);
1983
1984 /* Change FP division by a constant into multiplication.
1985 Only do this with -funsafe-math-optimizations. */
1986 if (flag_unsafe_math_optimizations
1987 && !REAL_VALUES_EQUAL (d, dconst0))
1988 {
1989 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1990 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1991 return simplify_gen_binary (MULT, mode, op0, tem);
1992 }
1993 }
1994 }
1995 else
1996 {
1997 /* 0/x is 0 (or x&0 if x has side-effects). */
1998 if (trueop0 == const0_rtx)
1999 return side_effects_p (op1)
2000 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2001 : const0_rtx;
2002 /* x/1 is x. */
2003 if (trueop1 == const1_rtx)
2004 {
2005 /* Handle narrowing DIV. */
2006 rtx x = gen_lowpart_common (mode, op0);
2007 if (x)
2008 return x;
2009 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2010 return gen_lowpart_SUBREG (mode, op0);
2011 return op0;
2012 }
2013 /* x/-1 is -x. */
2014 if (trueop1 == constm1_rtx)
2015 {
2016 rtx x = gen_lowpart_common (mode, op0);
2017 if (!x)
2018 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2019 ? gen_lowpart_SUBREG (mode, op0) : op0;
2020 return simplify_gen_unary (NEG, mode, x, mode);
2021 }
2022 }
2023 break;
2024
2025 case UMOD:
2026 /* 0%x is 0 (or x&0 if x has side-effects). */
2027 if (trueop0 == const0_rtx)
2028 return side_effects_p (op1)
2029 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2030 : const0_rtx;
2031 /* x%1 is 0 (of x&0 if x has side-effects). */
2032 if (trueop1 == const1_rtx)
2033 return side_effects_p (op0)
2034 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2035 : const0_rtx;
2036 /* Implement modulus by power of two as AND. */
2037 if (GET_CODE (trueop1) == CONST_INT
2038 && exact_log2 (INTVAL (trueop1)) > 0)
2039 return simplify_gen_binary (AND, mode, op0,
2040 GEN_INT (INTVAL (op1) - 1));
2041 break;
2042
2043 case MOD:
2044 /* 0%x is 0 (or x&0 if x has side-effects). */
2045 if (trueop0 == const0_rtx)
2046 return side_effects_p (op1)
2047 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2048 : const0_rtx;
2049 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2050 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2051 return side_effects_p (op0)
2052 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2053 : const0_rtx;
2054 break;
2055
2056 case ROTATERT:
2057 case ROTATE:
2058 case ASHIFTRT:
2059 /* Rotating ~0 always results in ~0. */
2060 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2061 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2062 && ! side_effects_p (op1))
2063 return op0;
2064
2065 /* Fall through.... */
2066
2067 case ASHIFT:
2068 case LSHIFTRT:
2069 if (trueop1 == const0_rtx)
2070 return op0;
2071 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2072 return op0;
2073 break;
2074
2075 case SMIN:
2076 if (width <= HOST_BITS_PER_WIDE_INT
2077 && GET_CODE (trueop1) == CONST_INT
2078 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2079 && ! side_effects_p (op0))
2080 return op1;
2081 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2082 return op0;
2083 tem = simplify_associative_operation (code, mode, op0, op1);
2084 if (tem)
2085 return tem;
2086 break;
2087
2088 case SMAX:
2089 if (width <= HOST_BITS_PER_WIDE_INT
2090 && GET_CODE (trueop1) == CONST_INT
2091 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2092 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2093 && ! side_effects_p (op0))
2094 return op1;
2095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2096 return op0;
2097 tem = simplify_associative_operation (code, mode, op0, op1);
2098 if (tem)
2099 return tem;
2100 break;
2101
2102 case UMIN:
2103 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2104 return op1;
2105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2106 return op0;
2107 tem = simplify_associative_operation (code, mode, op0, op1);
2108 if (tem)
2109 return tem;
2110 break;
2111
2112 case UMAX:
2113 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2114 return op1;
2115 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2116 return op0;
2117 tem = simplify_associative_operation (code, mode, op0, op1);
2118 if (tem)
2119 return tem;
2120 break;
2121
2122 case SS_PLUS:
2123 case US_PLUS:
2124 case SS_MINUS:
2125 case US_MINUS:
2126 /* ??? There are simplifications that can be done. */
2127 return 0;
2128
2129 case VEC_SELECT:
2130 if (!VECTOR_MODE_P (mode))
2131 {
2132 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2133 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2134 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2135 gcc_assert (XVECLEN (trueop1, 0) == 1);
2136 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2137
2138 if (GET_CODE (trueop0) == CONST_VECTOR)
2139 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2140 (trueop1, 0, 0)));
2141 }
2142 else
2143 {
2144 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2145 gcc_assert (GET_MODE_INNER (mode)
2146 == GET_MODE_INNER (GET_MODE (trueop0)));
2147 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2148
2149 if (GET_CODE (trueop0) == CONST_VECTOR)
2150 {
2151 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2152 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2153 rtvec v = rtvec_alloc (n_elts);
2154 unsigned int i;
2155
2156 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2157 for (i = 0; i < n_elts; i++)
2158 {
2159 rtx x = XVECEXP (trueop1, 0, i);
2160
2161 gcc_assert (GET_CODE (x) == CONST_INT);
2162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2163 INTVAL (x));
2164 }
2165
2166 return gen_rtx_CONST_VECTOR (mode, v);
2167 }
2168 }
2169 return 0;
2170 case VEC_CONCAT:
2171 {
2172 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2173 ? GET_MODE (trueop0)
2174 : GET_MODE_INNER (mode));
2175 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2176 ? GET_MODE (trueop1)
2177 : GET_MODE_INNER (mode));
2178
2179 gcc_assert (VECTOR_MODE_P (mode));
2180 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2181 == GET_MODE_SIZE (mode));
2182
2183 if (VECTOR_MODE_P (op0_mode))
2184 gcc_assert (GET_MODE_INNER (mode)
2185 == GET_MODE_INNER (op0_mode));
2186 else
2187 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2188
2189 if (VECTOR_MODE_P (op1_mode))
2190 gcc_assert (GET_MODE_INNER (mode)
2191 == GET_MODE_INNER (op1_mode));
2192 else
2193 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2194
2195 if ((GET_CODE (trueop0) == CONST_VECTOR
2196 || GET_CODE (trueop0) == CONST_INT
2197 || GET_CODE (trueop0) == CONST_DOUBLE)
2198 && (GET_CODE (trueop1) == CONST_VECTOR
2199 || GET_CODE (trueop1) == CONST_INT
2200 || GET_CODE (trueop1) == CONST_DOUBLE))
2201 {
2202 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2203 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2204 rtvec v = rtvec_alloc (n_elts);
2205 unsigned int i;
2206 unsigned in_n_elts = 1;
2207
2208 if (VECTOR_MODE_P (op0_mode))
2209 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2210 for (i = 0; i < n_elts; i++)
2211 {
2212 if (i < in_n_elts)
2213 {
2214 if (!VECTOR_MODE_P (op0_mode))
2215 RTVEC_ELT (v, i) = trueop0;
2216 else
2217 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2218 }
2219 else
2220 {
2221 if (!VECTOR_MODE_P (op1_mode))
2222 RTVEC_ELT (v, i) = trueop1;
2223 else
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2225 i - in_n_elts);
2226 }
2227 }
2228
2229 return gen_rtx_CONST_VECTOR (mode, v);
2230 }
2231 }
2232 return 0;
2233
2234 default:
2235 gcc_unreachable ();
2236 }
2237
2238 return 0;
2239 }
2240
2241 /* Get the integer argument values in two forms:
2242 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2243
2244 arg0 = INTVAL (trueop0);
2245 arg1 = INTVAL (trueop1);
2246
2247 if (width < HOST_BITS_PER_WIDE_INT)
2248 {
2249 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2250 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2251
2252 arg0s = arg0;
2253 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2254 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2255
2256 arg1s = arg1;
2257 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2258 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2259 }
2260 else
2261 {
2262 arg0s = arg0;
2263 arg1s = arg1;
2264 }
2265
2266 /* Compute the value of the arithmetic. */
2267
2268 switch (code)
2269 {
2270 case PLUS:
2271 val = arg0s + arg1s;
2272 break;
2273
2274 case MINUS:
2275 val = arg0s - arg1s;
2276 break;
2277
2278 case MULT:
2279 val = arg0s * arg1s;
2280 break;
2281
2282 case DIV:
2283 if (arg1s == 0
2284 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2285 && arg1s == -1))
2286 return 0;
2287 val = arg0s / arg1s;
2288 break;
2289
2290 case MOD:
2291 if (arg1s == 0
2292 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2293 && arg1s == -1))
2294 return 0;
2295 val = arg0s % arg1s;
2296 break;
2297
2298 case UDIV:
2299 if (arg1 == 0
2300 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2301 && arg1s == -1))
2302 return 0;
2303 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2304 break;
2305
2306 case UMOD:
2307 if (arg1 == 0
2308 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2309 && arg1s == -1))
2310 return 0;
2311 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2312 break;
2313
2314 case AND:
2315 val = arg0 & arg1;
2316 break;
2317
2318 case IOR:
2319 val = arg0 | arg1;
2320 break;
2321
2322 case XOR:
2323 val = arg0 ^ arg1;
2324 break;
2325
2326 case LSHIFTRT:
2327 case ASHIFT:
2328 case ASHIFTRT:
2329 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2330 value is in range. We can't return any old value for out-of-range
2331 arguments because either the middle-end (via shift_truncation_mask)
2332 or the back-end might be relying on target-specific knowledge.
2333 Nor can we rely on shift_truncation_mask, since the shift might
2334 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2335 if (SHIFT_COUNT_TRUNCATED)
2336 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2337 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2338 return 0;
2339
2340 val = (code == ASHIFT
2341 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2342 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2343
2344 /* Sign-extend the result for arithmetic right shifts. */
2345 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2346 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2347 break;
2348
2349 case ROTATERT:
2350 if (arg1 < 0)
2351 return 0;
2352
2353 arg1 %= width;
2354 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2355 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2356 break;
2357
2358 case ROTATE:
2359 if (arg1 < 0)
2360 return 0;
2361
2362 arg1 %= width;
2363 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2364 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2365 break;
2366
2367 case COMPARE:
2368 /* Do nothing here. */
2369 return 0;
2370
2371 case SMIN:
2372 val = arg0s <= arg1s ? arg0s : arg1s;
2373 break;
2374
2375 case UMIN:
2376 val = ((unsigned HOST_WIDE_INT) arg0
2377 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2378 break;
2379
2380 case SMAX:
2381 val = arg0s > arg1s ? arg0s : arg1s;
2382 break;
2383
2384 case UMAX:
2385 val = ((unsigned HOST_WIDE_INT) arg0
2386 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2387 break;
2388
2389 case SS_PLUS:
2390 case US_PLUS:
2391 case SS_MINUS:
2392 case US_MINUS:
2393 /* ??? There are simplifications that can be done. */
2394 return 0;
2395
2396 default:
2397 gcc_unreachable ();
2398 }
2399
2400 val = trunc_int_for_mode (val, mode);
2401
2402 return GEN_INT (val);
2403 }
2404 \f
2405 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2406 PLUS or MINUS.
2407
2408 Rather than test for specific case, we do this by a brute-force method
2409 and do all possible simplifications until no more changes occur. Then
2410 we rebuild the operation.
2411
2412 If FORCE is true, then always generate the rtx. This is used to
2413 canonicalize stuff emitted from simplify_gen_binary. Note that this
2414 can still fail if the rtx is too complex. It won't fail just because
2415 the result is not 'simpler' than the input, however. */
2416
2417 struct simplify_plus_minus_op_data
2418 {
2419 rtx op;
2420 int neg;
2421 };
2422
2423 static int
2424 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2425 {
2426 const struct simplify_plus_minus_op_data *d1 = p1;
2427 const struct simplify_plus_minus_op_data *d2 = p2;
2428
2429 return (commutative_operand_precedence (d2->op)
2430 - commutative_operand_precedence (d1->op));
2431 }
2432
2433 static rtx
2434 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2435 rtx op1, int force)
2436 {
2437 struct simplify_plus_minus_op_data ops[8];
2438 rtx result, tem;
2439 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2440 int first, changed;
2441 int i, j;
2442
2443 memset (ops, 0, sizeof ops);
2444
2445 /* Set up the two operands and then expand them until nothing has been
2446 changed. If we run out of room in our array, give up; this should
2447 almost never happen. */
2448
2449 ops[0].op = op0;
2450 ops[0].neg = 0;
2451 ops[1].op = op1;
2452 ops[1].neg = (code == MINUS);
2453
2454 do
2455 {
2456 changed = 0;
2457
2458 for (i = 0; i < n_ops; i++)
2459 {
2460 rtx this_op = ops[i].op;
2461 int this_neg = ops[i].neg;
2462 enum rtx_code this_code = GET_CODE (this_op);
2463
2464 switch (this_code)
2465 {
2466 case PLUS:
2467 case MINUS:
2468 if (n_ops == 7)
2469 return NULL_RTX;
2470
2471 ops[n_ops].op = XEXP (this_op, 1);
2472 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2473 n_ops++;
2474
2475 ops[i].op = XEXP (this_op, 0);
2476 input_ops++;
2477 changed = 1;
2478 break;
2479
2480 case NEG:
2481 ops[i].op = XEXP (this_op, 0);
2482 ops[i].neg = ! this_neg;
2483 changed = 1;
2484 break;
2485
2486 case CONST:
2487 if (n_ops < 7
2488 && GET_CODE (XEXP (this_op, 0)) == PLUS
2489 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2490 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2491 {
2492 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2493 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2494 ops[n_ops].neg = this_neg;
2495 n_ops++;
2496 input_consts++;
2497 changed = 1;
2498 }
2499 break;
2500
2501 case NOT:
2502 /* ~a -> (-a - 1) */
2503 if (n_ops != 7)
2504 {
2505 ops[n_ops].op = constm1_rtx;
2506 ops[n_ops++].neg = this_neg;
2507 ops[i].op = XEXP (this_op, 0);
2508 ops[i].neg = !this_neg;
2509 changed = 1;
2510 }
2511 break;
2512
2513 case CONST_INT:
2514 if (this_neg)
2515 {
2516 ops[i].op = neg_const_int (mode, this_op);
2517 ops[i].neg = 0;
2518 changed = 1;
2519 }
2520 break;
2521
2522 default:
2523 break;
2524 }
2525 }
2526 }
2527 while (changed);
2528
2529 /* If we only have two operands, we can't do anything. */
2530 if (n_ops <= 2 && !force)
2531 return NULL_RTX;
2532
2533 /* Count the number of CONSTs we didn't split above. */
2534 for (i = 0; i < n_ops; i++)
2535 if (GET_CODE (ops[i].op) == CONST)
2536 input_consts++;
2537
2538 /* Now simplify each pair of operands until nothing changes. The first
2539 time through just simplify constants against each other. */
2540
2541 first = 1;
2542 do
2543 {
2544 changed = first;
2545
2546 for (i = 0; i < n_ops - 1; i++)
2547 for (j = i + 1; j < n_ops; j++)
2548 {
2549 rtx lhs = ops[i].op, rhs = ops[j].op;
2550 int lneg = ops[i].neg, rneg = ops[j].neg;
2551
2552 if (lhs != 0 && rhs != 0
2553 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2554 {
2555 enum rtx_code ncode = PLUS;
2556
2557 if (lneg != rneg)
2558 {
2559 ncode = MINUS;
2560 if (lneg)
2561 tem = lhs, lhs = rhs, rhs = tem;
2562 }
2563 else if (swap_commutative_operands_p (lhs, rhs))
2564 tem = lhs, lhs = rhs, rhs = tem;
2565
2566 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2567
2568 /* Reject "simplifications" that just wrap the two
2569 arguments in a CONST. Failure to do so can result
2570 in infinite recursion with simplify_binary_operation
2571 when it calls us to simplify CONST operations. */
2572 if (tem
2573 && ! (GET_CODE (tem) == CONST
2574 && GET_CODE (XEXP (tem, 0)) == ncode
2575 && XEXP (XEXP (tem, 0), 0) == lhs
2576 && XEXP (XEXP (tem, 0), 1) == rhs)
2577 /* Don't allow -x + -1 -> ~x simplifications in the
2578 first pass. This allows us the chance to combine
2579 the -1 with other constants. */
2580 && ! (first
2581 && GET_CODE (tem) == NOT
2582 && XEXP (tem, 0) == rhs))
2583 {
2584 lneg &= rneg;
2585 if (GET_CODE (tem) == NEG)
2586 tem = XEXP (tem, 0), lneg = !lneg;
2587 if (GET_CODE (tem) == CONST_INT && lneg)
2588 tem = neg_const_int (mode, tem), lneg = 0;
2589
2590 ops[i].op = tem;
2591 ops[i].neg = lneg;
2592 ops[j].op = NULL_RTX;
2593 changed = 1;
2594 }
2595 }
2596 }
2597
2598 first = 0;
2599 }
2600 while (changed);
2601
2602 /* Pack all the operands to the lower-numbered entries. */
2603 for (i = 0, j = 0; j < n_ops; j++)
2604 if (ops[j].op)
2605 ops[i++] = ops[j];
2606 n_ops = i;
2607
2608 /* Sort the operations based on swap_commutative_operands_p. */
2609 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2610
2611 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2612 if (n_ops == 2
2613 && GET_CODE (ops[1].op) == CONST_INT
2614 && CONSTANT_P (ops[0].op)
2615 && ops[0].neg)
2616 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2617
2618 /* We suppressed creation of trivial CONST expressions in the
2619 combination loop to avoid recursion. Create one manually now.
2620 The combination loop should have ensured that there is exactly
2621 one CONST_INT, and the sort will have ensured that it is last
2622 in the array and that any other constant will be next-to-last. */
2623
2624 if (n_ops > 1
2625 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2626 && CONSTANT_P (ops[n_ops - 2].op))
2627 {
2628 rtx value = ops[n_ops - 1].op;
2629 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2630 value = neg_const_int (mode, value);
2631 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2632 n_ops--;
2633 }
2634
2635 /* Count the number of CONSTs that we generated. */
2636 n_consts = 0;
2637 for (i = 0; i < n_ops; i++)
2638 if (GET_CODE (ops[i].op) == CONST)
2639 n_consts++;
2640
2641 /* Give up if we didn't reduce the number of operands we had. Make
2642 sure we count a CONST as two operands. If we have the same
2643 number of operands, but have made more CONSTs than before, this
2644 is also an improvement, so accept it. */
2645 if (!force
2646 && (n_ops + n_consts > input_ops
2647 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2648 return NULL_RTX;
2649
2650 /* Put a non-negated operand first, if possible. */
2651
2652 for (i = 0; i < n_ops && ops[i].neg; i++)
2653 continue;
2654 if (i == n_ops)
2655 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2656 else if (i != 0)
2657 {
2658 tem = ops[0].op;
2659 ops[0] = ops[i];
2660 ops[i].op = tem;
2661 ops[i].neg = 1;
2662 }
2663
2664 /* Now make the result by performing the requested operations. */
2665 result = ops[0].op;
2666 for (i = 1; i < n_ops; i++)
2667 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2668 mode, result, ops[i].op);
2669
2670 return result;
2671 }
2672
2673 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2674 static bool
2675 plus_minus_operand_p (rtx x)
2676 {
2677 return GET_CODE (x) == PLUS
2678 || GET_CODE (x) == MINUS
2679 || (GET_CODE (x) == CONST
2680 && GET_CODE (XEXP (x, 0)) == PLUS
2681 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2682 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2683 }
2684
2685 /* Like simplify_binary_operation except used for relational operators.
2686 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2687 not also be VOIDmode.
2688
2689 CMP_MODE specifies in which mode the comparison is done in, so it is
2690 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2691 the operands or, if both are VOIDmode, the operands are compared in
2692 "infinite precision". */
2693 rtx
2694 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2695 enum machine_mode cmp_mode, rtx op0, rtx op1)
2696 {
2697 rtx tem, trueop0, trueop1;
2698
2699 if (cmp_mode == VOIDmode)
2700 cmp_mode = GET_MODE (op0);
2701 if (cmp_mode == VOIDmode)
2702 cmp_mode = GET_MODE (op1);
2703
2704 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2705 if (tem)
2706 {
2707 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2708 {
2709 if (tem == const0_rtx)
2710 return CONST0_RTX (mode);
2711 #ifdef FLOAT_STORE_FLAG_VALUE
2712 {
2713 REAL_VALUE_TYPE val;
2714 val = FLOAT_STORE_FLAG_VALUE (mode);
2715 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2716 }
2717 #else
2718 return NULL_RTX;
2719 #endif
2720 }
2721 if (VECTOR_MODE_P (mode))
2722 {
2723 if (tem == const0_rtx)
2724 return CONST0_RTX (mode);
2725 #ifdef VECTOR_STORE_FLAG_VALUE
2726 {
2727 int i, units;
2728 rtvec c;
2729
2730 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2731 if (val == NULL_RTX)
2732 return NULL_RTX;
2733 if (val == const1_rtx)
2734 return CONST1_RTX (mode);
2735
2736 units = GET_MODE_NUNITS (mode);
2737 v = rtvec_alloc (units);
2738 for (i = 0; i < units; i++)
2739 RTVEC_ELT (v, i) = val;
2740 return gen_rtx_raw_CONST_VECTOR (mode, v);
2741 }
2742 #else
2743 return NULL_RTX;
2744 #endif
2745 }
2746
2747 return tem;
2748 }
2749
2750 /* For the following tests, ensure const0_rtx is op1. */
2751 if (swap_commutative_operands_p (op0, op1)
2752 || (op0 == const0_rtx && op1 != const0_rtx))
2753 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2754
2755 /* If op0 is a compare, extract the comparison arguments from it. */
2756 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2757 return simplify_relational_operation (code, mode, VOIDmode,
2758 XEXP (op0, 0), XEXP (op0, 1));
2759
2760 if (mode == VOIDmode
2761 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2762 || CC0_P (op0))
2763 return NULL_RTX;
2764
2765 trueop0 = avoid_constant_pool_reference (op0);
2766 trueop1 = avoid_constant_pool_reference (op1);
2767 return simplify_relational_operation_1 (code, mode, cmp_mode,
2768 trueop0, trueop1);
2769 }
2770
2771 /* This part of simplify_relational_operation is only used when CMP_MODE
2772 is not in class MODE_CC (i.e. it is a real comparison).
2773
2774 MODE is the mode of the result, while CMP_MODE specifies in which
2775 mode the comparison is done in, so it is the mode of the operands. */
2776
2777 static rtx
2778 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2779 enum machine_mode cmp_mode, rtx op0, rtx op1)
2780 {
2781 enum rtx_code op0code = GET_CODE (op0);
2782
2783 if (GET_CODE (op1) == CONST_INT)
2784 {
2785 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2786 {
2787 /* If op0 is a comparison, extract the comparison arguments form it. */
2788 if (code == NE)
2789 {
2790 if (GET_MODE (op0) == cmp_mode)
2791 return simplify_rtx (op0);
2792 else
2793 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2794 XEXP (op0, 0), XEXP (op0, 1));
2795 }
2796 else if (code == EQ)
2797 {
2798 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2799 if (new_code != UNKNOWN)
2800 return simplify_gen_relational (new_code, mode, VOIDmode,
2801 XEXP (op0, 0), XEXP (op0, 1));
2802 }
2803 }
2804 }
2805
2806 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2807 if ((code == EQ || code == NE)
2808 && (op0code == PLUS || op0code == MINUS)
2809 && CONSTANT_P (op1)
2810 && CONSTANT_P (XEXP (op0, 1)))
2811 {
2812 rtx x = XEXP (op0, 0);
2813 rtx c = XEXP (op0, 1);
2814
2815 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2816 cmp_mode, op1, c);
2817 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2818 }
2819
2820 return NULL_RTX;
2821 }
2822
2823 /* Check if the given comparison (done in the given MODE) is actually a
2824 tautology or a contradiction.
2825 If no simplification is possible, this function returns zero.
2826 Otherwise, it returns either const_true_rtx or const0_rtx. */
2827
2828 rtx
2829 simplify_const_relational_operation (enum rtx_code code,
2830 enum machine_mode mode,
2831 rtx op0, rtx op1)
2832 {
2833 int equal, op0lt, op0ltu, op1lt, op1ltu;
2834 rtx tem;
2835 rtx trueop0;
2836 rtx trueop1;
2837
2838 gcc_assert (mode != VOIDmode
2839 || (GET_MODE (op0) == VOIDmode
2840 && GET_MODE (op1) == VOIDmode));
2841
2842 /* If op0 is a compare, extract the comparison arguments from it. */
2843 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2844 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2845
2846 /* We can't simplify MODE_CC values since we don't know what the
2847 actual comparison is. */
2848 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2849 return 0;
2850
2851 /* Make sure the constant is second. */
2852 if (swap_commutative_operands_p (op0, op1))
2853 {
2854 tem = op0, op0 = op1, op1 = tem;
2855 code = swap_condition (code);
2856 }
2857
2858 trueop0 = avoid_constant_pool_reference (op0);
2859 trueop1 = avoid_constant_pool_reference (op1);
2860
2861 /* For integer comparisons of A and B maybe we can simplify A - B and can
2862 then simplify a comparison of that with zero. If A and B are both either
2863 a register or a CONST_INT, this can't help; testing for these cases will
2864 prevent infinite recursion here and speed things up.
2865
2866 If CODE is an unsigned comparison, then we can never do this optimization,
2867 because it gives an incorrect result if the subtraction wraps around zero.
2868 ANSI C defines unsigned operations such that they never overflow, and
2869 thus such cases can not be ignored; but we cannot do it even for
2870 signed comparisons for languages such as Java, so test flag_wrapv. */
2871
2872 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2873 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2874 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2875 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2876 /* We cannot do this for == or != if tem is a nonzero address. */
2877 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2878 && code != GTU && code != GEU && code != LTU && code != LEU)
2879 return simplify_const_relational_operation (signed_condition (code),
2880 mode, tem, const0_rtx);
2881
2882 if (flag_unsafe_math_optimizations && code == ORDERED)
2883 return const_true_rtx;
2884
2885 if (flag_unsafe_math_optimizations && code == UNORDERED)
2886 return const0_rtx;
2887
2888 /* For modes without NaNs, if the two operands are equal, we know the
2889 result except if they have side-effects. */
2890 if (! HONOR_NANS (GET_MODE (trueop0))
2891 && rtx_equal_p (trueop0, trueop1)
2892 && ! side_effects_p (trueop0))
2893 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2894
2895 /* If the operands are floating-point constants, see if we can fold
2896 the result. */
2897 else if (GET_CODE (trueop0) == CONST_DOUBLE
2898 && GET_CODE (trueop1) == CONST_DOUBLE
2899 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2900 {
2901 REAL_VALUE_TYPE d0, d1;
2902
2903 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2904 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2905
2906 /* Comparisons are unordered iff at least one of the values is NaN. */
2907 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2908 switch (code)
2909 {
2910 case UNEQ:
2911 case UNLT:
2912 case UNGT:
2913 case UNLE:
2914 case UNGE:
2915 case NE:
2916 case UNORDERED:
2917 return const_true_rtx;
2918 case EQ:
2919 case LT:
2920 case GT:
2921 case LE:
2922 case GE:
2923 case LTGT:
2924 case ORDERED:
2925 return const0_rtx;
2926 default:
2927 return 0;
2928 }
2929
2930 equal = REAL_VALUES_EQUAL (d0, d1);
2931 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2932 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2933 }
2934
2935 /* Otherwise, see if the operands are both integers. */
2936 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2937 && (GET_CODE (trueop0) == CONST_DOUBLE
2938 || GET_CODE (trueop0) == CONST_INT)
2939 && (GET_CODE (trueop1) == CONST_DOUBLE
2940 || GET_CODE (trueop1) == CONST_INT))
2941 {
2942 int width = GET_MODE_BITSIZE (mode);
2943 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2944 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2945
2946 /* Get the two words comprising each integer constant. */
2947 if (GET_CODE (trueop0) == CONST_DOUBLE)
2948 {
2949 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2950 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2951 }
2952 else
2953 {
2954 l0u = l0s = INTVAL (trueop0);
2955 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2956 }
2957
2958 if (GET_CODE (trueop1) == CONST_DOUBLE)
2959 {
2960 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2961 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2962 }
2963 else
2964 {
2965 l1u = l1s = INTVAL (trueop1);
2966 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2967 }
2968
2969 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2970 we have to sign or zero-extend the values. */
2971 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2972 {
2973 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2974 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2975
2976 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2977 l0s |= ((HOST_WIDE_INT) (-1) << width);
2978
2979 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2980 l1s |= ((HOST_WIDE_INT) (-1) << width);
2981 }
2982 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2983 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2984
2985 equal = (h0u == h1u && l0u == l1u);
2986 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2987 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2988 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2989 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2990 }
2991
2992 /* Otherwise, there are some code-specific tests we can make. */
2993 else
2994 {
2995 /* Optimize comparisons with upper and lower bounds. */
2996 if (SCALAR_INT_MODE_P (mode)
2997 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2998 {
2999 rtx mmin, mmax;
3000 int sign;
3001
3002 if (code == GEU
3003 || code == LEU
3004 || code == GTU
3005 || code == LTU)
3006 sign = 0;
3007 else
3008 sign = 1;
3009
3010 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3011
3012 tem = NULL_RTX;
3013 switch (code)
3014 {
3015 case GEU:
3016 case GE:
3017 /* x >= min is always true. */
3018 if (rtx_equal_p (trueop1, mmin))
3019 tem = const_true_rtx;
3020 else
3021 break;
3022
3023 case LEU:
3024 case LE:
3025 /* x <= max is always true. */
3026 if (rtx_equal_p (trueop1, mmax))
3027 tem = const_true_rtx;
3028 break;
3029
3030 case GTU:
3031 case GT:
3032 /* x > max is always false. */
3033 if (rtx_equal_p (trueop1, mmax))
3034 tem = const0_rtx;
3035 break;
3036
3037 case LTU:
3038 case LT:
3039 /* x < min is always false. */
3040 if (rtx_equal_p (trueop1, mmin))
3041 tem = const0_rtx;
3042 break;
3043
3044 default:
3045 break;
3046 }
3047 if (tem == const0_rtx
3048 || tem == const_true_rtx)
3049 return tem;
3050 }
3051
3052 switch (code)
3053 {
3054 case EQ:
3055 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3056 return const0_rtx;
3057 break;
3058
3059 case NE:
3060 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3061 return const_true_rtx;
3062 break;
3063
3064 case LT:
3065 /* Optimize abs(x) < 0.0. */
3066 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3067 {
3068 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3069 : trueop0;
3070 if (GET_CODE (tem) == ABS)
3071 return const0_rtx;
3072 }
3073 break;
3074
3075 case GE:
3076 /* Optimize abs(x) >= 0.0. */
3077 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3078 {
3079 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3080 : trueop0;
3081 if (GET_CODE (tem) == ABS)
3082 return const_true_rtx;
3083 }
3084 break;
3085
3086 case UNGE:
3087 /* Optimize ! (abs(x) < 0.0). */
3088 if (trueop1 == CONST0_RTX (mode))
3089 {
3090 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3091 : trueop0;
3092 if (GET_CODE (tem) == ABS)
3093 return const_true_rtx;
3094 }
3095 break;
3096
3097 default:
3098 break;
3099 }
3100
3101 return 0;
3102 }
3103
3104 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3105 as appropriate. */
3106 switch (code)
3107 {
3108 case EQ:
3109 case UNEQ:
3110 return equal ? const_true_rtx : const0_rtx;
3111 case NE:
3112 case LTGT:
3113 return ! equal ? const_true_rtx : const0_rtx;
3114 case LT:
3115 case UNLT:
3116 return op0lt ? const_true_rtx : const0_rtx;
3117 case GT:
3118 case UNGT:
3119 return op1lt ? const_true_rtx : const0_rtx;
3120 case LTU:
3121 return op0ltu ? const_true_rtx : const0_rtx;
3122 case GTU:
3123 return op1ltu ? const_true_rtx : const0_rtx;
3124 case LE:
3125 case UNLE:
3126 return equal || op0lt ? const_true_rtx : const0_rtx;
3127 case GE:
3128 case UNGE:
3129 return equal || op1lt ? const_true_rtx : const0_rtx;
3130 case LEU:
3131 return equal || op0ltu ? const_true_rtx : const0_rtx;
3132 case GEU:
3133 return equal || op1ltu ? const_true_rtx : const0_rtx;
3134 case ORDERED:
3135 return const_true_rtx;
3136 case UNORDERED:
3137 return const0_rtx;
3138 default:
3139 gcc_unreachable ();
3140 }
3141 }
3142 \f
3143 /* Simplify CODE, an operation with result mode MODE and three operands,
3144 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3145 a constant. Return 0 if no simplifications is possible. */
3146
3147 rtx
3148 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3149 enum machine_mode op0_mode, rtx op0, rtx op1,
3150 rtx op2)
3151 {
3152 unsigned int width = GET_MODE_BITSIZE (mode);
3153
3154 /* VOIDmode means "infinite" precision. */
3155 if (width == 0)
3156 width = HOST_BITS_PER_WIDE_INT;
3157
3158 switch (code)
3159 {
3160 case SIGN_EXTRACT:
3161 case ZERO_EXTRACT:
3162 if (GET_CODE (op0) == CONST_INT
3163 && GET_CODE (op1) == CONST_INT
3164 && GET_CODE (op2) == CONST_INT
3165 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3166 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3167 {
3168 /* Extracting a bit-field from a constant */
3169 HOST_WIDE_INT val = INTVAL (op0);
3170
3171 if (BITS_BIG_ENDIAN)
3172 val >>= (GET_MODE_BITSIZE (op0_mode)
3173 - INTVAL (op2) - INTVAL (op1));
3174 else
3175 val >>= INTVAL (op2);
3176
3177 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3178 {
3179 /* First zero-extend. */
3180 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3181 /* If desired, propagate sign bit. */
3182 if (code == SIGN_EXTRACT
3183 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3184 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3185 }
3186
3187 /* Clear the bits that don't belong in our mode,
3188 unless they and our sign bit are all one.
3189 So we get either a reasonable negative value or a reasonable
3190 unsigned value for this mode. */
3191 if (width < HOST_BITS_PER_WIDE_INT
3192 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3193 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3194 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3195
3196 return gen_int_mode (val, mode);
3197 }
3198 break;
3199
3200 case IF_THEN_ELSE:
3201 if (GET_CODE (op0) == CONST_INT)
3202 return op0 != const0_rtx ? op1 : op2;
3203
3204 /* Convert c ? a : a into "a". */
3205 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3206 return op1;
3207
3208 /* Convert a != b ? a : b into "a". */
3209 if (GET_CODE (op0) == NE
3210 && ! side_effects_p (op0)
3211 && ! HONOR_NANS (mode)
3212 && ! HONOR_SIGNED_ZEROS (mode)
3213 && ((rtx_equal_p (XEXP (op0, 0), op1)
3214 && rtx_equal_p (XEXP (op0, 1), op2))
3215 || (rtx_equal_p (XEXP (op0, 0), op2)
3216 && rtx_equal_p (XEXP (op0, 1), op1))))
3217 return op1;
3218
3219 /* Convert a == b ? a : b into "b". */
3220 if (GET_CODE (op0) == EQ
3221 && ! side_effects_p (op0)
3222 && ! HONOR_NANS (mode)
3223 && ! HONOR_SIGNED_ZEROS (mode)
3224 && ((rtx_equal_p (XEXP (op0, 0), op1)
3225 && rtx_equal_p (XEXP (op0, 1), op2))
3226 || (rtx_equal_p (XEXP (op0, 0), op2)
3227 && rtx_equal_p (XEXP (op0, 1), op1))))
3228 return op2;
3229
3230 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3231 {
3232 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3233 ? GET_MODE (XEXP (op0, 1))
3234 : GET_MODE (XEXP (op0, 0)));
3235 rtx temp;
3236
3237 /* Look for happy constants in op1 and op2. */
3238 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3239 {
3240 HOST_WIDE_INT t = INTVAL (op1);
3241 HOST_WIDE_INT f = INTVAL (op2);
3242
3243 if (t == STORE_FLAG_VALUE && f == 0)
3244 code = GET_CODE (op0);
3245 else if (t == 0 && f == STORE_FLAG_VALUE)
3246 {
3247 enum rtx_code tmp;
3248 tmp = reversed_comparison_code (op0, NULL_RTX);
3249 if (tmp == UNKNOWN)
3250 break;
3251 code = tmp;
3252 }
3253 else
3254 break;
3255
3256 return simplify_gen_relational (code, mode, cmp_mode,
3257 XEXP (op0, 0), XEXP (op0, 1));
3258 }
3259
3260 if (cmp_mode == VOIDmode)
3261 cmp_mode = op0_mode;
3262 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3263 cmp_mode, XEXP (op0, 0),
3264 XEXP (op0, 1));
3265
3266 /* See if any simplifications were possible. */
3267 if (temp)
3268 {
3269 if (GET_CODE (temp) == CONST_INT)
3270 return temp == const0_rtx ? op2 : op1;
3271 else if (temp)
3272 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3273 }
3274 }
3275 break;
3276
3277 case VEC_MERGE:
3278 gcc_assert (GET_MODE (op0) == mode);
3279 gcc_assert (GET_MODE (op1) == mode);
3280 gcc_assert (VECTOR_MODE_P (mode));
3281 op2 = avoid_constant_pool_reference (op2);
3282 if (GET_CODE (op2) == CONST_INT)
3283 {
3284 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3285 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3286 int mask = (1 << n_elts) - 1;
3287
3288 if (!(INTVAL (op2) & mask))
3289 return op1;
3290 if ((INTVAL (op2) & mask) == mask)
3291 return op0;
3292
3293 op0 = avoid_constant_pool_reference (op0);
3294 op1 = avoid_constant_pool_reference (op1);
3295 if (GET_CODE (op0) == CONST_VECTOR
3296 && GET_CODE (op1) == CONST_VECTOR)
3297 {
3298 rtvec v = rtvec_alloc (n_elts);
3299 unsigned int i;
3300
3301 for (i = 0; i < n_elts; i++)
3302 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3303 ? CONST_VECTOR_ELT (op0, i)
3304 : CONST_VECTOR_ELT (op1, i));
3305 return gen_rtx_CONST_VECTOR (mode, v);
3306 }
3307 }
3308 break;
3309
3310 default:
3311 gcc_unreachable ();
3312 }
3313
3314 return 0;
3315 }
3316
3317 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3318 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3319
3320 Works by unpacking OP into a collection of 8-bit values
3321 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3322 and then repacking them again for OUTERMODE. */
3323
3324 static rtx
3325 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3326 enum machine_mode innermode, unsigned int byte)
3327 {
3328 /* We support up to 512-bit values (for V8DFmode). */
3329 enum {
3330 max_bitsize = 512,
3331 value_bit = 8,
3332 value_mask = (1 << value_bit) - 1
3333 };
3334 unsigned char value[max_bitsize / value_bit];
3335 int value_start;
3336 int i;
3337 int elem;
3338
3339 int num_elem;
3340 rtx * elems;
3341 int elem_bitsize;
3342 rtx result_s;
3343 rtvec result_v = NULL;
3344 enum mode_class outer_class;
3345 enum machine_mode outer_submode;
3346
3347 /* Some ports misuse CCmode. */
3348 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3349 return op;
3350
3351 /* We have no way to represent a complex constant at the rtl level. */
3352 if (COMPLEX_MODE_P (outermode))
3353 return NULL_RTX;
3354
3355 /* Unpack the value. */
3356
3357 if (GET_CODE (op) == CONST_VECTOR)
3358 {
3359 num_elem = CONST_VECTOR_NUNITS (op);
3360 elems = &CONST_VECTOR_ELT (op, 0);
3361 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3362 }
3363 else
3364 {
3365 num_elem = 1;
3366 elems = &op;
3367 elem_bitsize = max_bitsize;
3368 }
3369 /* If this asserts, it is too complicated; reducing value_bit may help. */
3370 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3371 /* I don't know how to handle endianness of sub-units. */
3372 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3373
3374 for (elem = 0; elem < num_elem; elem++)
3375 {
3376 unsigned char * vp;
3377 rtx el = elems[elem];
3378
3379 /* Vectors are kept in target memory order. (This is probably
3380 a mistake.) */
3381 {
3382 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3383 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3384 / BITS_PER_UNIT);
3385 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3386 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3387 unsigned bytele = (subword_byte % UNITS_PER_WORD
3388 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3389 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3390 }
3391
3392 switch (GET_CODE (el))
3393 {
3394 case CONST_INT:
3395 for (i = 0;
3396 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3397 i += value_bit)
3398 *vp++ = INTVAL (el) >> i;
3399 /* CONST_INTs are always logically sign-extended. */
3400 for (; i < elem_bitsize; i += value_bit)
3401 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3402 break;
3403
3404 case CONST_DOUBLE:
3405 if (GET_MODE (el) == VOIDmode)
3406 {
3407 /* If this triggers, someone should have generated a
3408 CONST_INT instead. */
3409 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3410
3411 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3412 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3413 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3414 {
3415 *vp++
3416 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3417 i += value_bit;
3418 }
3419 /* It shouldn't matter what's done here, so fill it with
3420 zero. */
3421 for (; i < max_bitsize; i += value_bit)
3422 *vp++ = 0;
3423 }
3424 else
3425 {
3426 long tmp[max_bitsize / 32];
3427 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3428
3429 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3430 gcc_assert (bitsize <= elem_bitsize);
3431 gcc_assert (bitsize % value_bit == 0);
3432
3433 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3434 GET_MODE (el));
3435
3436 /* real_to_target produces its result in words affected by
3437 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3438 and use WORDS_BIG_ENDIAN instead; see the documentation
3439 of SUBREG in rtl.texi. */
3440 for (i = 0; i < bitsize; i += value_bit)
3441 {
3442 int ibase;
3443 if (WORDS_BIG_ENDIAN)
3444 ibase = bitsize - 1 - i;
3445 else
3446 ibase = i;
3447 *vp++ = tmp[ibase / 32] >> i % 32;
3448 }
3449
3450 /* It shouldn't matter what's done here, so fill it with
3451 zero. */
3452 for (; i < elem_bitsize; i += value_bit)
3453 *vp++ = 0;
3454 }
3455 break;
3456
3457 default:
3458 gcc_unreachable ();
3459 }
3460 }
3461
3462 /* Now, pick the right byte to start with. */
3463 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3464 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3465 will already have offset 0. */
3466 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3467 {
3468 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3469 - byte);
3470 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3471 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3472 byte = (subword_byte % UNITS_PER_WORD
3473 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3474 }
3475
3476 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3477 so if it's become negative it will instead be very large.) */
3478 gcc_assert (byte < GET_MODE_SIZE (innermode));
3479
3480 /* Convert from bytes to chunks of size value_bit. */
3481 value_start = byte * (BITS_PER_UNIT / value_bit);
3482
3483 /* Re-pack the value. */
3484
3485 if (VECTOR_MODE_P (outermode))
3486 {
3487 num_elem = GET_MODE_NUNITS (outermode);
3488 result_v = rtvec_alloc (num_elem);
3489 elems = &RTVEC_ELT (result_v, 0);
3490 outer_submode = GET_MODE_INNER (outermode);
3491 }
3492 else
3493 {
3494 num_elem = 1;
3495 elems = &result_s;
3496 outer_submode = outermode;
3497 }
3498
3499 outer_class = GET_MODE_CLASS (outer_submode);
3500 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3501
3502 gcc_assert (elem_bitsize % value_bit == 0);
3503 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3504
3505 for (elem = 0; elem < num_elem; elem++)
3506 {
3507 unsigned char *vp;
3508
3509 /* Vectors are stored in target memory order. (This is probably
3510 a mistake.) */
3511 {
3512 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3513 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3514 / BITS_PER_UNIT);
3515 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3516 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3517 unsigned bytele = (subword_byte % UNITS_PER_WORD
3518 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3519 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3520 }
3521
3522 switch (outer_class)
3523 {
3524 case MODE_INT:
3525 case MODE_PARTIAL_INT:
3526 {
3527 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3528
3529 for (i = 0;
3530 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3531 i += value_bit)
3532 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3533 for (; i < elem_bitsize; i += value_bit)
3534 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3535 << (i - HOST_BITS_PER_WIDE_INT));
3536
3537 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3538 know why. */
3539 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3540 elems[elem] = gen_int_mode (lo, outer_submode);
3541 else
3542 elems[elem] = immed_double_const (lo, hi, outer_submode);
3543 }
3544 break;
3545
3546 case MODE_FLOAT:
3547 {
3548 REAL_VALUE_TYPE r;
3549 long tmp[max_bitsize / 32];
3550
3551 /* real_from_target wants its input in words affected by
3552 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3553 and use WORDS_BIG_ENDIAN instead; see the documentation
3554 of SUBREG in rtl.texi. */
3555 for (i = 0; i < max_bitsize / 32; i++)
3556 tmp[i] = 0;
3557 for (i = 0; i < elem_bitsize; i += value_bit)
3558 {
3559 int ibase;
3560 if (WORDS_BIG_ENDIAN)
3561 ibase = elem_bitsize - 1 - i;
3562 else
3563 ibase = i;
3564 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3565 }
3566
3567 real_from_target (&r, tmp, outer_submode);
3568 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3569 }
3570 break;
3571
3572 default:
3573 gcc_unreachable ();
3574 }
3575 }
3576 if (VECTOR_MODE_P (outermode))
3577 return gen_rtx_CONST_VECTOR (outermode, result_v);
3578 else
3579 return result_s;
3580 }
3581
3582 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3583 Return 0 if no simplifications are possible. */
3584 rtx
3585 simplify_subreg (enum machine_mode outermode, rtx op,
3586 enum machine_mode innermode, unsigned int byte)
3587 {
3588 /* Little bit of sanity checking. */
3589 gcc_assert (innermode != VOIDmode);
3590 gcc_assert (outermode != VOIDmode);
3591 gcc_assert (innermode != BLKmode);
3592 gcc_assert (outermode != BLKmode);
3593
3594 gcc_assert (GET_MODE (op) == innermode
3595 || GET_MODE (op) == VOIDmode);
3596
3597 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3598 gcc_assert (byte < GET_MODE_SIZE (innermode));
3599
3600 if (outermode == innermode && !byte)
3601 return op;
3602
3603 if (GET_CODE (op) == CONST_INT
3604 || GET_CODE (op) == CONST_DOUBLE
3605 || GET_CODE (op) == CONST_VECTOR)
3606 return simplify_immed_subreg (outermode, op, innermode, byte);
3607
3608 /* Changing mode twice with SUBREG => just change it once,
3609 or not at all if changing back op starting mode. */
3610 if (GET_CODE (op) == SUBREG)
3611 {
3612 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3613 int final_offset = byte + SUBREG_BYTE (op);
3614 rtx newx;
3615
3616 if (outermode == innermostmode
3617 && byte == 0 && SUBREG_BYTE (op) == 0)
3618 return SUBREG_REG (op);
3619
3620 /* The SUBREG_BYTE represents offset, as if the value were stored
3621 in memory. Irritating exception is paradoxical subreg, where
3622 we define SUBREG_BYTE to be 0. On big endian machines, this
3623 value should be negative. For a moment, undo this exception. */
3624 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3625 {
3626 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3627 if (WORDS_BIG_ENDIAN)
3628 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3629 if (BYTES_BIG_ENDIAN)
3630 final_offset += difference % UNITS_PER_WORD;
3631 }
3632 if (SUBREG_BYTE (op) == 0
3633 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3634 {
3635 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3636 if (WORDS_BIG_ENDIAN)
3637 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3638 if (BYTES_BIG_ENDIAN)
3639 final_offset += difference % UNITS_PER_WORD;
3640 }
3641
3642 /* See whether resulting subreg will be paradoxical. */
3643 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3644 {
3645 /* In nonparadoxical subregs we can't handle negative offsets. */
3646 if (final_offset < 0)
3647 return NULL_RTX;
3648 /* Bail out in case resulting subreg would be incorrect. */
3649 if (final_offset % GET_MODE_SIZE (outermode)
3650 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3651 return NULL_RTX;
3652 }
3653 else
3654 {
3655 int offset = 0;
3656 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3657
3658 /* In paradoxical subreg, see if we are still looking on lower part.
3659 If so, our SUBREG_BYTE will be 0. */
3660 if (WORDS_BIG_ENDIAN)
3661 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3662 if (BYTES_BIG_ENDIAN)
3663 offset += difference % UNITS_PER_WORD;
3664 if (offset == final_offset)
3665 final_offset = 0;
3666 else
3667 return NULL_RTX;
3668 }
3669
3670 /* Recurse for further possible simplifications. */
3671 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3672 final_offset);
3673 if (newx)
3674 return newx;
3675 if (validate_subreg (outermode, innermostmode,
3676 SUBREG_REG (op), final_offset))
3677 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3678 return NULL_RTX;
3679 }
3680
3681 /* SUBREG of a hard register => just change the register number
3682 and/or mode. If the hard register is not valid in that mode,
3683 suppress this simplification. If the hard register is the stack,
3684 frame, or argument pointer, leave this as a SUBREG. */
3685
3686 if (REG_P (op)
3687 && REGNO (op) < FIRST_PSEUDO_REGISTER
3688 #ifdef CANNOT_CHANGE_MODE_CLASS
3689 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3690 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3691 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3692 #endif
3693 && ((reload_completed && !frame_pointer_needed)
3694 || (REGNO (op) != FRAME_POINTER_REGNUM
3695 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3696 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3697 #endif
3698 ))
3699 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3700 && REGNO (op) != ARG_POINTER_REGNUM
3701 #endif
3702 && REGNO (op) != STACK_POINTER_REGNUM
3703 && subreg_offset_representable_p (REGNO (op), innermode,
3704 byte, outermode))
3705 {
3706 unsigned int regno = REGNO (op);
3707 unsigned int final_regno
3708 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3709
3710 /* ??? We do allow it if the current REG is not valid for
3711 its mode. This is a kludge to work around how float/complex
3712 arguments are passed on 32-bit SPARC and should be fixed. */
3713 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3714 || ! HARD_REGNO_MODE_OK (regno, innermode))
3715 {
3716 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3717
3718 /* Propagate original regno. We don't have any way to specify
3719 the offset inside original regno, so do so only for lowpart.
3720 The information is used only by alias analysis that can not
3721 grog partial register anyway. */
3722
3723 if (subreg_lowpart_offset (outermode, innermode) == byte)
3724 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3725 return x;
3726 }
3727 }
3728
3729 /* If we have a SUBREG of a register that we are replacing and we are
3730 replacing it with a MEM, make a new MEM and try replacing the
3731 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3732 or if we would be widening it. */
3733
3734 if (MEM_P (op)
3735 && ! mode_dependent_address_p (XEXP (op, 0))
3736 /* Allow splitting of volatile memory references in case we don't
3737 have instruction to move the whole thing. */
3738 && (! MEM_VOLATILE_P (op)
3739 || ! have_insn_for (SET, innermode))
3740 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3741 return adjust_address_nv (op, outermode, byte);
3742
3743 /* Handle complex values represented as CONCAT
3744 of real and imaginary part. */
3745 if (GET_CODE (op) == CONCAT)
3746 {
3747 unsigned int inner_size, final_offset;
3748 rtx part, res;
3749
3750 inner_size = GET_MODE_UNIT_SIZE (innermode);
3751 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3752 final_offset = byte % inner_size;
3753 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3754 return NULL_RTX;
3755
3756 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3757 if (res)
3758 return res;
3759 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3760 return gen_rtx_SUBREG (outermode, part, final_offset);
3761 return NULL_RTX;
3762 }
3763
3764 /* Optimize SUBREG truncations of zero and sign extended values. */
3765 if ((GET_CODE (op) == ZERO_EXTEND
3766 || GET_CODE (op) == SIGN_EXTEND)
3767 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3768 {
3769 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3770
3771 /* If we're requesting the lowpart of a zero or sign extension,
3772 there are three possibilities. If the outermode is the same
3773 as the origmode, we can omit both the extension and the subreg.
3774 If the outermode is not larger than the origmode, we can apply
3775 the truncation without the extension. Finally, if the outermode
3776 is larger than the origmode, but both are integer modes, we
3777 can just extend to the appropriate mode. */
3778 if (bitpos == 0)
3779 {
3780 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3781 if (outermode == origmode)
3782 return XEXP (op, 0);
3783 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3784 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3785 subreg_lowpart_offset (outermode,
3786 origmode));
3787 if (SCALAR_INT_MODE_P (outermode))
3788 return simplify_gen_unary (GET_CODE (op), outermode,
3789 XEXP (op, 0), origmode);
3790 }
3791
3792 /* A SUBREG resulting from a zero extension may fold to zero if
3793 it extracts higher bits that the ZERO_EXTEND's source bits. */
3794 if (GET_CODE (op) == ZERO_EXTEND
3795 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3796 return CONST0_RTX (outermode);
3797 }
3798
3799 return NULL_RTX;
3800 }
3801
3802 /* Make a SUBREG operation or equivalent if it folds. */
3803
3804 rtx
3805 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3806 enum machine_mode innermode, unsigned int byte)
3807 {
3808 rtx newx;
3809
3810 newx = simplify_subreg (outermode, op, innermode, byte);
3811 if (newx)
3812 return newx;
3813
3814 if (GET_CODE (op) == SUBREG
3815 || GET_CODE (op) == CONCAT
3816 || GET_MODE (op) == VOIDmode)
3817 return NULL_RTX;
3818
3819 if (validate_subreg (outermode, innermode, op, byte))
3820 return gen_rtx_SUBREG (outermode, op, byte);
3821
3822 return NULL_RTX;
3823 }
3824
3825 /* Simplify X, an rtx expression.
3826
3827 Return the simplified expression or NULL if no simplifications
3828 were possible.
3829
3830 This is the preferred entry point into the simplification routines;
3831 however, we still allow passes to call the more specific routines.
3832
3833 Right now GCC has three (yes, three) major bodies of RTL simplification
3834 code that need to be unified.
3835
3836 1. fold_rtx in cse.c. This code uses various CSE specific
3837 information to aid in RTL simplification.
3838
3839 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3840 it uses combine specific information to aid in RTL
3841 simplification.
3842
3843 3. The routines in this file.
3844
3845
3846 Long term we want to only have one body of simplification code; to
3847 get to that state I recommend the following steps:
3848
3849 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3850 which are not pass dependent state into these routines.
3851
3852 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3853 use this routine whenever possible.
3854
3855 3. Allow for pass dependent state to be provided to these
3856 routines and add simplifications based on the pass dependent
3857 state. Remove code from cse.c & combine.c that becomes
3858 redundant/dead.
3859
3860 It will take time, but ultimately the compiler will be easier to
3861 maintain and improve. It's totally silly that when we add a
3862 simplification that it needs to be added to 4 places (3 for RTL
3863 simplification and 1 for tree simplification. */
3864
3865 rtx
3866 simplify_rtx (rtx x)
3867 {
3868 enum rtx_code code = GET_CODE (x);
3869 enum machine_mode mode = GET_MODE (x);
3870
3871 switch (GET_RTX_CLASS (code))
3872 {
3873 case RTX_UNARY:
3874 return simplify_unary_operation (code, mode,
3875 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3876 case RTX_COMM_ARITH:
3877 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3878 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3879
3880 /* Fall through.... */
3881
3882 case RTX_BIN_ARITH:
3883 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3884
3885 case RTX_TERNARY:
3886 case RTX_BITFIELD_OPS:
3887 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3888 XEXP (x, 0), XEXP (x, 1),
3889 XEXP (x, 2));
3890
3891 case RTX_COMPARE:
3892 case RTX_COMM_COMPARE:
3893 return simplify_relational_operation (code, mode,
3894 ((GET_MODE (XEXP (x, 0))
3895 != VOIDmode)
3896 ? GET_MODE (XEXP (x, 0))
3897 : GET_MODE (XEXP (x, 1))),
3898 XEXP (x, 0),
3899 XEXP (x, 1));
3900
3901 case RTX_EXTRA:
3902 if (code == SUBREG)
3903 return simplify_gen_subreg (mode, SUBREG_REG (x),
3904 GET_MODE (SUBREG_REG (x)),
3905 SUBREG_BYTE (x));
3906 break;
3907
3908 case RTX_OBJ:
3909 if (code == LO_SUM)
3910 {
3911 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3912 if (GET_CODE (XEXP (x, 0)) == HIGH
3913 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3914 return XEXP (x, 1);
3915 }
3916 break;
3917
3918 default:
3919 break;
3920 }
3921 return NULL;
3922 }