usage.adb: Change "pragma inline" to "pragma Inline" in information and error messages
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 static bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 return false;
100
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 }
105 \f
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
108
109 rtx
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
112 {
113 rtx tem;
114
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
119
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
124
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
127
128 if (code == PLUS || code == MINUS)
129 {
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
133 }
134
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 }
137 \f
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
140 rtx
141 avoid_constant_pool_reference (rtx x)
142 {
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
145
146 switch (GET_CODE (x))
147 {
148 case MEM:
149 break;
150
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
156 {
157 REAL_VALUE_TYPE d;
158
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 }
162 return x;
163
164 default:
165 return x;
166 }
167
168 addr = XEXP (x, 0);
169
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
172
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
175
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
179
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
182
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
187 {
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
190 }
191
192 return c;
193 }
194 \f
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
197
198 rtx
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
201 {
202 rtx tem;
203
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
207
208 return gen_rtx_fmt_e (code, mode, op);
209 }
210
211 /* Likewise for ternary operations. */
212
213 rtx
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
223
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
225 }
226
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
229
230 rtx
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
233 {
234 rtx tem;
235
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
239
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
241 }
242 \f
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
245
246 rtx
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
248 {
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
253
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
257
258 if (x == old_rtx)
259 return new_rtx;
260
261 switch (GET_RTX_CLASS (code))
262 {
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
270
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
278
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
289
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
302
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
306 {
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
314 }
315 break;
316
317 case RTX_OBJ:
318 if (code == MEM)
319 {
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
324 }
325 else if (code == LO_SUM)
326 {
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
329
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
333
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
337 }
338 else if (code == REG)
339 {
340 if (REG_P (old_rtx) && REGNO (x) == REGNO (old_rtx))
341 return new_rtx;
342 }
343 break;
344
345 default:
346 break;
347 }
348 return x;
349 }
350 \f
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
354 rtx
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
357 {
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
360
361 if (code == VEC_DUPLICATE)
362 {
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
365 {
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
371 }
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
374 {
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
379
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
384 {
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
388
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
393 }
394 return gen_rtx_CONST_VECTOR (mode, v);
395 }
396 }
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
399
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
401 {
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
409
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
412 {
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
419 }
420 return gen_rtx_CONST_VECTOR (mode, v);
421 }
422
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
426
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 {
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
432
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
441 }
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
445 {
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
448
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
453
454 if (op_mode == VOIDmode)
455 {
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
460 }
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
462 ;
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
465
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
469 }
470
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
473 {
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
476
477 switch (code)
478 {
479 case NOT:
480 val = ~ arg0;
481 break;
482
483 case NEG:
484 val = - arg0;
485 break;
486
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
490
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
497
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
501 ;
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
505
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
509 {
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
514 }
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
518
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
525
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
533
534 case TRUNCATE:
535 val = arg0;
536 break;
537
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
543 {
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
549 }
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
555
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
560 {
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
566 }
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 {
569 val
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
574 }
575 else
576 return 0;
577 break;
578
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
585
586 default:
587 gcc_unreachable ();
588 }
589
590 val = trunc_int_for_mode (val, mode);
591
592 return GEN_INT (val);
593 }
594
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
601 {
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
604
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
609
610 switch (code)
611 {
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
616
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
620
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
627
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
631 {
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
636 }
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
640
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
651
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
661
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
670
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
680
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
685
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
688
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
691
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
695
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
701 {
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
707
708 hv = HWI_SIGN_EXTEND (lv);
709 }
710 break;
711
712 case SQRT:
713 return 0;
714
715 default:
716 return 0;
717 }
718
719 return immed_double_const (lv, hv, mode);
720 }
721
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
724 {
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
727
728 switch (code)
729 {
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
752 {
753 long tmp[4];
754 int i;
755
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
760 }
761 default:
762 gcc_unreachable ();
763 }
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
765 }
766
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
771 {
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
776
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
780 switch (code)
781 {
782 case FIX:
783 if (REAL_VALUE_ISNAN (x))
784 return const0_rtx;
785
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
788 {
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
791 tl = -1;
792 }
793 else
794 {
795 th = 0;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
797 }
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
800 {
801 xh = th;
802 xl = tl;
803 break;
804 }
805
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
808 {
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
810 tl = 0;
811 }
812 else
813 {
814 th = -1;
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
816 }
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
819 {
820 xh = th;
821 xl = tl;
822 break;
823 }
824 REAL_VALUE_TO_INT (&xl, &xh, x);
825 break;
826
827 case UNSIGNED_FIX:
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
829 return const0_rtx;
830
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
833 {
834 th = -1;
835 tl = -1;
836 }
837 else if (width >= HOST_BITS_PER_WIDE_INT)
838 {
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
841 tl = -1;
842 }
843 else
844 {
845 th = 0;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
847 }
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
850 {
851 xh = th;
852 xl = tl;
853 break;
854 }
855
856 REAL_VALUE_TO_INT (&xl, &xh, x);
857 break;
858
859 default:
860 gcc_unreachable ();
861 }
862 return immed_double_const (xl, xh, mode);
863 }
864
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
867 else
868 {
869 enum rtx_code reversed;
870 rtx temp;
871
872 /* There are some simplifications we can do even if the operands
873 aren't constant. */
874 switch (code)
875 {
876 case NOT:
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
879 return XEXP (op, 0);
880
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
885 != UNKNOWN))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
888
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
893
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
897
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
902 XEXP (op, 1),
903 mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
905
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
914
915
916
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
924 {
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
927 }
928
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && COMPARISON_P (op)
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
937
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
941
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
948
949 break;
950
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
955
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
964
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
975
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 {
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
983 {
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
989 }
990
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
994 }
995
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 {
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1003 }
1004
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1009 {
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1015 }
1016
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1024
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1032
1033 break;
1034
1035 case SIGN_EXTEND:
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1039 the VAX). */
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1046
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1055
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1059 && (CONSTANT_P (op)
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1065 #endif
1066 break;
1067
1068 case ZERO_EXTEND:
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1077
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1081 && (CONSTANT_P (op)
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1087 #endif
1088 break;
1089
1090 default:
1091 break;
1092 }
1093
1094 return 0;
1095 }
1096 }
1097 \f
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1103
1104 static rtx
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1106 rtx op0, rtx op1)
1107 {
1108 rtx tem;
1109
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1112 {
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1115 {
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1118 }
1119
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1123
1124 tem = op0;
1125 op0 = op1;
1126 op1 = tem;
1127 }
1128
1129 if (GET_CODE (op0) == code)
1130 {
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1133 {
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1136 }
1137
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1142 if (tem != 0)
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1144
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 }
1152
1153 return 0;
1154 }
1155
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1158
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1161 rtx
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1164 {
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1170
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1177
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1181 {
1182 tem = op0, op0 = op1, op1 = tem;
1183 }
1184
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1187
1188 if (VECTOR_MODE_P (mode)
1189 && GET_CODE (trueop0) == CONST_VECTOR
1190 && GET_CODE (trueop1) == CONST_VECTOR)
1191 {
1192 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1193 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1194 enum machine_mode op0mode = GET_MODE (trueop0);
1195 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1196 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1197 enum machine_mode op1mode = GET_MODE (trueop1);
1198 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1199 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1200 rtvec v = rtvec_alloc (n_elts);
1201 unsigned int i;
1202
1203 gcc_assert (op0_n_elts == n_elts);
1204 gcc_assert (op1_n_elts == n_elts);
1205 for (i = 0; i < n_elts; i++)
1206 {
1207 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1208 CONST_VECTOR_ELT (trueop0, i),
1209 CONST_VECTOR_ELT (trueop1, i));
1210 if (!x)
1211 return 0;
1212 RTVEC_ELT (v, i) = x;
1213 }
1214
1215 return gen_rtx_CONST_VECTOR (mode, v);
1216 }
1217
1218 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1219 && GET_CODE (trueop0) == CONST_DOUBLE
1220 && GET_CODE (trueop1) == CONST_DOUBLE
1221 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1222 {
1223 if (code == AND
1224 || code == IOR
1225 || code == XOR)
1226 {
1227 long tmp0[4];
1228 long tmp1[4];
1229 REAL_VALUE_TYPE r;
1230 int i;
1231
1232 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1233 GET_MODE (op0));
1234 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1235 GET_MODE (op1));
1236 for (i = 0; i < 4; i++)
1237 {
1238 switch (code)
1239 {
1240 case AND:
1241 tmp0[i] &= tmp1[i];
1242 break;
1243 case IOR:
1244 tmp0[i] |= tmp1[i];
1245 break;
1246 case XOR:
1247 tmp0[i] ^= tmp1[i];
1248 break;
1249 default:
1250 gcc_unreachable ();
1251 }
1252 }
1253 real_from_target (&r, tmp0, mode);
1254 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1255 }
1256 else
1257 {
1258 REAL_VALUE_TYPE f0, f1, value;
1259
1260 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1261 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1262 f0 = real_value_truncate (mode, f0);
1263 f1 = real_value_truncate (mode, f1);
1264
1265 if (HONOR_SNANS (mode)
1266 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1267 return 0;
1268
1269 if (code == DIV
1270 && REAL_VALUES_EQUAL (f1, dconst0)
1271 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1272 return 0;
1273
1274 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1275 && flag_trapping_math
1276 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1277 {
1278 int s0 = REAL_VALUE_NEGATIVE (f0);
1279 int s1 = REAL_VALUE_NEGATIVE (f1);
1280
1281 switch (code)
1282 {
1283 case PLUS:
1284 /* Inf + -Inf = NaN plus exception. */
1285 if (s0 != s1)
1286 return 0;
1287 break;
1288 case MINUS:
1289 /* Inf - Inf = NaN plus exception. */
1290 if (s0 == s1)
1291 return 0;
1292 break;
1293 case DIV:
1294 /* Inf / Inf = NaN plus exception. */
1295 return 0;
1296 default:
1297 break;
1298 }
1299 }
1300
1301 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1302 && flag_trapping_math
1303 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1304 || (REAL_VALUE_ISINF (f1)
1305 && REAL_VALUES_EQUAL (f0, dconst0))))
1306 /* Inf * 0 = NaN plus exception. */
1307 return 0;
1308
1309 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1310
1311 value = real_value_truncate (mode, value);
1312 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1313 }
1314 }
1315
1316 /* We can fold some multi-word operations. */
1317 if (GET_MODE_CLASS (mode) == MODE_INT
1318 && width == HOST_BITS_PER_WIDE_INT * 2
1319 && (GET_CODE (trueop0) == CONST_DOUBLE
1320 || GET_CODE (trueop0) == CONST_INT)
1321 && (GET_CODE (trueop1) == CONST_DOUBLE
1322 || GET_CODE (trueop1) == CONST_INT))
1323 {
1324 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1325 HOST_WIDE_INT h1, h2, hv, ht;
1326
1327 if (GET_CODE (trueop0) == CONST_DOUBLE)
1328 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1329 else
1330 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1331
1332 if (GET_CODE (trueop1) == CONST_DOUBLE)
1333 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1334 else
1335 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1336
1337 switch (code)
1338 {
1339 case MINUS:
1340 /* A - B == A + (-B). */
1341 neg_double (l2, h2, &lv, &hv);
1342 l2 = lv, h2 = hv;
1343
1344 /* Fall through.... */
1345
1346 case PLUS:
1347 add_double (l1, h1, l2, h2, &lv, &hv);
1348 break;
1349
1350 case MULT:
1351 mul_double (l1, h1, l2, h2, &lv, &hv);
1352 break;
1353
1354 case DIV:
1355 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1356 &lv, &hv, &lt, &ht))
1357 return 0;
1358 break;
1359
1360 case MOD:
1361 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1362 &lt, &ht, &lv, &hv))
1363 return 0;
1364 break;
1365
1366 case UDIV:
1367 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1368 &lv, &hv, &lt, &ht))
1369 return 0;
1370 break;
1371
1372 case UMOD:
1373 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1374 &lt, &ht, &lv, &hv))
1375 return 0;
1376 break;
1377
1378 case AND:
1379 lv = l1 & l2, hv = h1 & h2;
1380 break;
1381
1382 case IOR:
1383 lv = l1 | l2, hv = h1 | h2;
1384 break;
1385
1386 case XOR:
1387 lv = l1 ^ l2, hv = h1 ^ h2;
1388 break;
1389
1390 case SMIN:
1391 if (h1 < h2
1392 || (h1 == h2
1393 && ((unsigned HOST_WIDE_INT) l1
1394 < (unsigned HOST_WIDE_INT) l2)))
1395 lv = l1, hv = h1;
1396 else
1397 lv = l2, hv = h2;
1398 break;
1399
1400 case SMAX:
1401 if (h1 > h2
1402 || (h1 == h2
1403 && ((unsigned HOST_WIDE_INT) l1
1404 > (unsigned HOST_WIDE_INT) l2)))
1405 lv = l1, hv = h1;
1406 else
1407 lv = l2, hv = h2;
1408 break;
1409
1410 case UMIN:
1411 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1412 || (h1 == h2
1413 && ((unsigned HOST_WIDE_INT) l1
1414 < (unsigned HOST_WIDE_INT) l2)))
1415 lv = l1, hv = h1;
1416 else
1417 lv = l2, hv = h2;
1418 break;
1419
1420 case UMAX:
1421 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1422 || (h1 == h2
1423 && ((unsigned HOST_WIDE_INT) l1
1424 > (unsigned HOST_WIDE_INT) l2)))
1425 lv = l1, hv = h1;
1426 else
1427 lv = l2, hv = h2;
1428 break;
1429
1430 case LSHIFTRT: case ASHIFTRT:
1431 case ASHIFT:
1432 case ROTATE: case ROTATERT:
1433 if (SHIFT_COUNT_TRUNCATED)
1434 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1435
1436 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1437 return 0;
1438
1439 if (code == LSHIFTRT || code == ASHIFTRT)
1440 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1441 code == ASHIFTRT);
1442 else if (code == ASHIFT)
1443 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1444 else if (code == ROTATE)
1445 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1446 else /* code == ROTATERT */
1447 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1448 break;
1449
1450 default:
1451 return 0;
1452 }
1453
1454 return immed_double_const (lv, hv, mode);
1455 }
1456
1457 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1458 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1459 {
1460 /* Even if we can't compute a constant result,
1461 there are some cases worth simplifying. */
1462
1463 switch (code)
1464 {
1465 case PLUS:
1466 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1467 when x is NaN, infinite, or finite and nonzero. They aren't
1468 when x is -0 and the rounding mode is not towards -infinity,
1469 since (-0) + 0 is then 0. */
1470 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1471 return op0;
1472
1473 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1474 transformations are safe even for IEEE. */
1475 if (GET_CODE (op0) == NEG)
1476 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1477 else if (GET_CODE (op1) == NEG)
1478 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1479
1480 /* (~a) + 1 -> -a */
1481 if (INTEGRAL_MODE_P (mode)
1482 && GET_CODE (op0) == NOT
1483 && trueop1 == const1_rtx)
1484 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1485
1486 /* Handle both-operands-constant cases. We can only add
1487 CONST_INTs to constants since the sum of relocatable symbols
1488 can't be handled by most assemblers. Don't add CONST_INT
1489 to CONST_INT since overflow won't be computed properly if wider
1490 than HOST_BITS_PER_WIDE_INT. */
1491
1492 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1493 && GET_CODE (op1) == CONST_INT)
1494 return plus_constant (op0, INTVAL (op1));
1495 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1496 && GET_CODE (op0) == CONST_INT)
1497 return plus_constant (op1, INTVAL (op0));
1498
1499 /* See if this is something like X * C - X or vice versa or
1500 if the multiplication is written as a shift. If so, we can
1501 distribute and make a new multiply, shift, or maybe just
1502 have X (if C is 2 in the example above). But don't make
1503 something more expensive than we had before. */
1504
1505 if (! FLOAT_MODE_P (mode))
1506 {
1507 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1508 rtx lhs = op0, rhs = op1;
1509
1510 if (GET_CODE (lhs) == NEG)
1511 coeff0 = -1, lhs = XEXP (lhs, 0);
1512 else if (GET_CODE (lhs) == MULT
1513 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1514 {
1515 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1516 }
1517 else if (GET_CODE (lhs) == ASHIFT
1518 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1519 && INTVAL (XEXP (lhs, 1)) >= 0
1520 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1521 {
1522 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1523 lhs = XEXP (lhs, 0);
1524 }
1525
1526 if (GET_CODE (rhs) == NEG)
1527 coeff1 = -1, rhs = XEXP (rhs, 0);
1528 else if (GET_CODE (rhs) == MULT
1529 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1530 {
1531 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1532 }
1533 else if (GET_CODE (rhs) == ASHIFT
1534 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1535 && INTVAL (XEXP (rhs, 1)) >= 0
1536 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1537 {
1538 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1539 rhs = XEXP (rhs, 0);
1540 }
1541
1542 if (rtx_equal_p (lhs, rhs))
1543 {
1544 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1545 tem = simplify_gen_binary (MULT, mode, lhs,
1546 GEN_INT (coeff0 + coeff1));
1547 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1548 ? tem : 0;
1549 }
1550 }
1551
1552 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1553 if ((GET_CODE (op1) == CONST_INT
1554 || GET_CODE (op1) == CONST_DOUBLE)
1555 && GET_CODE (op0) == XOR
1556 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1557 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1558 && mode_signbit_p (mode, op1))
1559 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1560 simplify_gen_binary (XOR, mode, op1,
1561 XEXP (op0, 1)));
1562
1563 /* If one of the operands is a PLUS or a MINUS, see if we can
1564 simplify this by the associative law.
1565 Don't use the associative law for floating point.
1566 The inaccuracy makes it nonassociative,
1567 and subtle programs can break if operations are associated. */
1568
1569 if (INTEGRAL_MODE_P (mode)
1570 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1571 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1572 || (GET_CODE (op0) == CONST
1573 && GET_CODE (XEXP (op0, 0)) == PLUS)
1574 || (GET_CODE (op1) == CONST
1575 && GET_CODE (XEXP (op1, 0)) == PLUS))
1576 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1577 return tem;
1578
1579 /* Reassociate floating point addition only when the user
1580 specifies unsafe math optimizations. */
1581 if (FLOAT_MODE_P (mode)
1582 && flag_unsafe_math_optimizations)
1583 {
1584 tem = simplify_associative_operation (code, mode, op0, op1);
1585 if (tem)
1586 return tem;
1587 }
1588 break;
1589
1590 case COMPARE:
1591 #ifdef HAVE_cc0
1592 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1593 using cc0, in which case we want to leave it as a COMPARE
1594 so we can distinguish it from a register-register-copy.
1595
1596 In IEEE floating point, x-0 is not the same as x. */
1597
1598 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1599 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1600 && trueop1 == CONST0_RTX (mode))
1601 return op0;
1602 #endif
1603
1604 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1605 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1606 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1607 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1608 {
1609 rtx xop00 = XEXP (op0, 0);
1610 rtx xop10 = XEXP (op1, 0);
1611
1612 #ifdef HAVE_cc0
1613 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1614 #else
1615 if (REG_P (xop00) && REG_P (xop10)
1616 && GET_MODE (xop00) == GET_MODE (xop10)
1617 && REGNO (xop00) == REGNO (xop10)
1618 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1619 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1620 #endif
1621 return xop00;
1622 }
1623 break;
1624
1625 case MINUS:
1626 /* We can't assume x-x is 0 even with non-IEEE floating point,
1627 but since it is zero except in very strange circumstances, we
1628 will treat it as zero with -funsafe-math-optimizations. */
1629 if (rtx_equal_p (trueop0, trueop1)
1630 && ! side_effects_p (op0)
1631 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1632 return CONST0_RTX (mode);
1633
1634 /* Change subtraction from zero into negation. (0 - x) is the
1635 same as -x when x is NaN, infinite, or finite and nonzero.
1636 But if the mode has signed zeros, and does not round towards
1637 -infinity, then 0 - 0 is 0, not -0. */
1638 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1639 return simplify_gen_unary (NEG, mode, op1, mode);
1640
1641 /* (-1 - a) is ~a. */
1642 if (trueop0 == constm1_rtx)
1643 return simplify_gen_unary (NOT, mode, op1, mode);
1644
1645 /* Subtracting 0 has no effect unless the mode has signed zeros
1646 and supports rounding towards -infinity. In such a case,
1647 0 - 0 is -0. */
1648 if (!(HONOR_SIGNED_ZEROS (mode)
1649 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1650 && trueop1 == CONST0_RTX (mode))
1651 return op0;
1652
1653 /* See if this is something like X * C - X or vice versa or
1654 if the multiplication is written as a shift. If so, we can
1655 distribute and make a new multiply, shift, or maybe just
1656 have X (if C is 2 in the example above). But don't make
1657 something more expensive than we had before. */
1658
1659 if (! FLOAT_MODE_P (mode))
1660 {
1661 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1662 rtx lhs = op0, rhs = op1;
1663
1664 if (GET_CODE (lhs) == NEG)
1665 coeff0 = -1, lhs = XEXP (lhs, 0);
1666 else if (GET_CODE (lhs) == MULT
1667 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1668 {
1669 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1670 }
1671 else if (GET_CODE (lhs) == ASHIFT
1672 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1673 && INTVAL (XEXP (lhs, 1)) >= 0
1674 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1675 {
1676 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1677 lhs = XEXP (lhs, 0);
1678 }
1679
1680 if (GET_CODE (rhs) == NEG)
1681 coeff1 = - 1, rhs = XEXP (rhs, 0);
1682 else if (GET_CODE (rhs) == MULT
1683 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1684 {
1685 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1686 }
1687 else if (GET_CODE (rhs) == ASHIFT
1688 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1689 && INTVAL (XEXP (rhs, 1)) >= 0
1690 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1691 {
1692 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1693 rhs = XEXP (rhs, 0);
1694 }
1695
1696 if (rtx_equal_p (lhs, rhs))
1697 {
1698 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1699 tem = simplify_gen_binary (MULT, mode, lhs,
1700 GEN_INT (coeff0 - coeff1));
1701 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1702 ? tem : 0;
1703 }
1704 }
1705
1706 /* (a - (-b)) -> (a + b). True even for IEEE. */
1707 if (GET_CODE (op1) == NEG)
1708 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1709
1710 /* (-x - c) may be simplified as (-c - x). */
1711 if (GET_CODE (op0) == NEG
1712 && (GET_CODE (op1) == CONST_INT
1713 || GET_CODE (op1) == CONST_DOUBLE))
1714 {
1715 tem = simplify_unary_operation (NEG, mode, op1, mode);
1716 if (tem)
1717 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1718 }
1719
1720 /* If one of the operands is a PLUS or a MINUS, see if we can
1721 simplify this by the associative law.
1722 Don't use the associative law for floating point.
1723 The inaccuracy makes it nonassociative,
1724 and subtle programs can break if operations are associated. */
1725
1726 if (INTEGRAL_MODE_P (mode)
1727 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1728 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1729 || (GET_CODE (op0) == CONST
1730 && GET_CODE (XEXP (op0, 0)) == PLUS)
1731 || (GET_CODE (op1) == CONST
1732 && GET_CODE (XEXP (op1, 0)) == PLUS))
1733 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1734 return tem;
1735
1736 /* Don't let a relocatable value get a negative coeff. */
1737 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1738 return simplify_gen_binary (PLUS, mode,
1739 op0,
1740 neg_const_int (mode, op1));
1741
1742 /* (x - (x & y)) -> (x & ~y) */
1743 if (GET_CODE (op1) == AND)
1744 {
1745 if (rtx_equal_p (op0, XEXP (op1, 0)))
1746 {
1747 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1748 GET_MODE (XEXP (op1, 1)));
1749 return simplify_gen_binary (AND, mode, op0, tem);
1750 }
1751 if (rtx_equal_p (op0, XEXP (op1, 1)))
1752 {
1753 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1754 GET_MODE (XEXP (op1, 0)));
1755 return simplify_gen_binary (AND, mode, op0, tem);
1756 }
1757 }
1758 break;
1759
1760 case MULT:
1761 if (trueop1 == constm1_rtx)
1762 return simplify_gen_unary (NEG, mode, op0, mode);
1763
1764 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1765 x is NaN, since x * 0 is then also NaN. Nor is it valid
1766 when the mode has signed zeros, since multiplying a negative
1767 number by 0 will give -0, not 0. */
1768 if (!HONOR_NANS (mode)
1769 && !HONOR_SIGNED_ZEROS (mode)
1770 && trueop1 == CONST0_RTX (mode)
1771 && ! side_effects_p (op0))
1772 return op1;
1773
1774 /* In IEEE floating point, x*1 is not equivalent to x for
1775 signalling NaNs. */
1776 if (!HONOR_SNANS (mode)
1777 && trueop1 == CONST1_RTX (mode))
1778 return op0;
1779
1780 /* Convert multiply by constant power of two into shift unless
1781 we are still generating RTL. This test is a kludge. */
1782 if (GET_CODE (trueop1) == CONST_INT
1783 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1784 /* If the mode is larger than the host word size, and the
1785 uppermost bit is set, then this isn't a power of two due
1786 to implicit sign extension. */
1787 && (width <= HOST_BITS_PER_WIDE_INT
1788 || val != HOST_BITS_PER_WIDE_INT - 1))
1789 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1790
1791 /* x*2 is x+x and x*(-1) is -x */
1792 if (GET_CODE (trueop1) == CONST_DOUBLE
1793 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1794 && GET_MODE (op0) == mode)
1795 {
1796 REAL_VALUE_TYPE d;
1797 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1798
1799 if (REAL_VALUES_EQUAL (d, dconst2))
1800 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1801
1802 if (REAL_VALUES_EQUAL (d, dconstm1))
1803 return simplify_gen_unary (NEG, mode, op0, mode);
1804 }
1805
1806 /* Reassociate multiplication, but for floating point MULTs
1807 only when the user specifies unsafe math optimizations. */
1808 if (! FLOAT_MODE_P (mode)
1809 || flag_unsafe_math_optimizations)
1810 {
1811 tem = simplify_associative_operation (code, mode, op0, op1);
1812 if (tem)
1813 return tem;
1814 }
1815 break;
1816
1817 case IOR:
1818 if (trueop1 == const0_rtx)
1819 return op0;
1820 if (GET_CODE (trueop1) == CONST_INT
1821 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1822 == GET_MODE_MASK (mode)))
1823 return op1;
1824 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1825 return op0;
1826 /* A | (~A) -> -1 */
1827 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1828 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1829 && ! side_effects_p (op0)
1830 && GET_MODE_CLASS (mode) != MODE_CC)
1831 return constm1_rtx;
1832 tem = simplify_associative_operation (code, mode, op0, op1);
1833 if (tem)
1834 return tem;
1835 break;
1836
1837 case XOR:
1838 if (trueop1 == const0_rtx)
1839 return op0;
1840 if (GET_CODE (trueop1) == CONST_INT
1841 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1842 == GET_MODE_MASK (mode)))
1843 return simplify_gen_unary (NOT, mode, op0, mode);
1844 if (trueop0 == trueop1
1845 && ! side_effects_p (op0)
1846 && GET_MODE_CLASS (mode) != MODE_CC)
1847 return const0_rtx;
1848
1849 /* Canonicalize XOR of the most significant bit to PLUS. */
1850 if ((GET_CODE (op1) == CONST_INT
1851 || GET_CODE (op1) == CONST_DOUBLE)
1852 && mode_signbit_p (mode, op1))
1853 return simplify_gen_binary (PLUS, mode, op0, op1);
1854 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1855 if ((GET_CODE (op1) == CONST_INT
1856 || GET_CODE (op1) == CONST_DOUBLE)
1857 && GET_CODE (op0) == PLUS
1858 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1859 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1860 && mode_signbit_p (mode, XEXP (op0, 1)))
1861 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1862 simplify_gen_binary (XOR, mode, op1,
1863 XEXP (op0, 1)));
1864
1865 tem = simplify_associative_operation (code, mode, op0, op1);
1866 if (tem)
1867 return tem;
1868 break;
1869
1870 case AND:
1871 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1872 return const0_rtx;
1873 /* If we are turning off bits already known off in OP0, we need
1874 not do an AND. */
1875 if (GET_CODE (trueop1) == CONST_INT
1876 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1877 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1878 return op0;
1879 if (trueop0 == trueop1 && ! side_effects_p (op0)
1880 && GET_MODE_CLASS (mode) != MODE_CC)
1881 return op0;
1882 /* A & (~A) -> 0 */
1883 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1884 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1885 && ! side_effects_p (op0)
1886 && GET_MODE_CLASS (mode) != MODE_CC)
1887 return const0_rtx;
1888 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1889 ((A & N) + B) & M -> (A + B) & M
1890 Similarly if (N & M) == 0,
1891 ((A | N) + B) & M -> (A + B) & M
1892 and for - instead of + and/or ^ instead of |. */
1893 if (GET_CODE (trueop1) == CONST_INT
1894 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1895 && ~INTVAL (trueop1)
1896 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1897 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1898 {
1899 rtx pmop[2];
1900 int which;
1901
1902 pmop[0] = XEXP (op0, 0);
1903 pmop[1] = XEXP (op0, 1);
1904
1905 for (which = 0; which < 2; which++)
1906 {
1907 tem = pmop[which];
1908 switch (GET_CODE (tem))
1909 {
1910 case AND:
1911 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1913 == INTVAL (trueop1))
1914 pmop[which] = XEXP (tem, 0);
1915 break;
1916 case IOR:
1917 case XOR:
1918 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1919 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1920 pmop[which] = XEXP (tem, 0);
1921 break;
1922 default:
1923 break;
1924 }
1925 }
1926
1927 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1928 {
1929 tem = simplify_gen_binary (GET_CODE (op0), mode,
1930 pmop[0], pmop[1]);
1931 return simplify_gen_binary (code, mode, tem, op1);
1932 }
1933 }
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1938
1939 case UDIV:
1940 /* 0/x is 0 (or x&0 if x has side-effects). */
1941 if (trueop0 == const0_rtx)
1942 return side_effects_p (op1)
1943 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1944 : const0_rtx;
1945 /* x/1 is x. */
1946 if (trueop1 == const1_rtx)
1947 {
1948 /* Handle narrowing UDIV. */
1949 rtx x = gen_lowpart_common (mode, op0);
1950 if (x)
1951 return x;
1952 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1953 return gen_lowpart_SUBREG (mode, op0);
1954 return op0;
1955 }
1956 /* Convert divide by power of two into shift. */
1957 if (GET_CODE (trueop1) == CONST_INT
1958 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1959 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1960 break;
1961
1962 case DIV:
1963 /* Handle floating point and integers separately. */
1964 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1965 {
1966 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1967 safe for modes with NaNs, since 0.0 / 0.0 will then be
1968 NaN rather than 0.0. Nor is it safe for modes with signed
1969 zeros, since dividing 0 by a negative number gives -0.0 */
1970 if (trueop0 == CONST0_RTX (mode)
1971 && !HONOR_NANS (mode)
1972 && !HONOR_SIGNED_ZEROS (mode)
1973 && ! side_effects_p (op1))
1974 return op0;
1975 /* x/1.0 is x. */
1976 if (trueop1 == CONST1_RTX (mode)
1977 && !HONOR_SNANS (mode))
1978 return op0;
1979
1980 if (GET_CODE (trueop1) == CONST_DOUBLE
1981 && trueop1 != CONST0_RTX (mode))
1982 {
1983 REAL_VALUE_TYPE d;
1984 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1985
1986 /* x/-1.0 is -x. */
1987 if (REAL_VALUES_EQUAL (d, dconstm1)
1988 && !HONOR_SNANS (mode))
1989 return simplify_gen_unary (NEG, mode, op0, mode);
1990
1991 /* Change FP division by a constant into multiplication.
1992 Only do this with -funsafe-math-optimizations. */
1993 if (flag_unsafe_math_optimizations
1994 && !REAL_VALUES_EQUAL (d, dconst0))
1995 {
1996 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1997 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1998 return simplify_gen_binary (MULT, mode, op0, tem);
1999 }
2000 }
2001 }
2002 else
2003 {
2004 /* 0/x is 0 (or x&0 if x has side-effects). */
2005 if (trueop0 == const0_rtx)
2006 return side_effects_p (op1)
2007 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2008 : const0_rtx;
2009 /* x/1 is x. */
2010 if (trueop1 == const1_rtx)
2011 {
2012 /* Handle narrowing DIV. */
2013 rtx x = gen_lowpart_common (mode, op0);
2014 if (x)
2015 return x;
2016 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2017 return gen_lowpart_SUBREG (mode, op0);
2018 return op0;
2019 }
2020 /* x/-1 is -x. */
2021 if (trueop1 == constm1_rtx)
2022 {
2023 rtx x = gen_lowpart_common (mode, op0);
2024 if (!x)
2025 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2026 ? gen_lowpart_SUBREG (mode, op0) : op0;
2027 return simplify_gen_unary (NEG, mode, x, mode);
2028 }
2029 }
2030 break;
2031
2032 case UMOD:
2033 /* 0%x is 0 (or x&0 if x has side-effects). */
2034 if (trueop0 == const0_rtx)
2035 return side_effects_p (op1)
2036 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2037 : const0_rtx;
2038 /* x%1 is 0 (of x&0 if x has side-effects). */
2039 if (trueop1 == const1_rtx)
2040 return side_effects_p (op0)
2041 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2042 : const0_rtx;
2043 /* Implement modulus by power of two as AND. */
2044 if (GET_CODE (trueop1) == CONST_INT
2045 && exact_log2 (INTVAL (trueop1)) > 0)
2046 return simplify_gen_binary (AND, mode, op0,
2047 GEN_INT (INTVAL (op1) - 1));
2048 break;
2049
2050 case MOD:
2051 /* 0%x is 0 (or x&0 if x has side-effects). */
2052 if (trueop0 == const0_rtx)
2053 return side_effects_p (op1)
2054 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2055 : const0_rtx;
2056 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2057 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2058 return side_effects_p (op0)
2059 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2060 : const0_rtx;
2061 break;
2062
2063 case ROTATERT:
2064 case ROTATE:
2065 case ASHIFTRT:
2066 /* Rotating ~0 always results in ~0. */
2067 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2068 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2069 && ! side_effects_p (op1))
2070 return op0;
2071
2072 /* Fall through.... */
2073
2074 case ASHIFT:
2075 case LSHIFTRT:
2076 if (trueop1 == const0_rtx)
2077 return op0;
2078 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2079 return op0;
2080 break;
2081
2082 case SMIN:
2083 if (width <= HOST_BITS_PER_WIDE_INT
2084 && GET_CODE (trueop1) == CONST_INT
2085 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2086 && ! side_effects_p (op0))
2087 return op1;
2088 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2089 return op0;
2090 tem = simplify_associative_operation (code, mode, op0, op1);
2091 if (tem)
2092 return tem;
2093 break;
2094
2095 case SMAX:
2096 if (width <= HOST_BITS_PER_WIDE_INT
2097 && GET_CODE (trueop1) == CONST_INT
2098 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2099 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2100 && ! side_effects_p (op0))
2101 return op1;
2102 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2103 return op0;
2104 tem = simplify_associative_operation (code, mode, op0, op1);
2105 if (tem)
2106 return tem;
2107 break;
2108
2109 case UMIN:
2110 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2111 return op1;
2112 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2113 return op0;
2114 tem = simplify_associative_operation (code, mode, op0, op1);
2115 if (tem)
2116 return tem;
2117 break;
2118
2119 case UMAX:
2120 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2121 return op1;
2122 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2123 return op0;
2124 tem = simplify_associative_operation (code, mode, op0, op1);
2125 if (tem)
2126 return tem;
2127 break;
2128
2129 case SS_PLUS:
2130 case US_PLUS:
2131 case SS_MINUS:
2132 case US_MINUS:
2133 /* ??? There are simplifications that can be done. */
2134 return 0;
2135
2136 case VEC_SELECT:
2137 if (!VECTOR_MODE_P (mode))
2138 {
2139 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2140 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2141 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2142 gcc_assert (XVECLEN (trueop1, 0) == 1);
2143 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2144
2145 if (GET_CODE (trueop0) == CONST_VECTOR)
2146 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2147 (trueop1, 0, 0)));
2148 }
2149 else
2150 {
2151 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2152 gcc_assert (GET_MODE_INNER (mode)
2153 == GET_MODE_INNER (GET_MODE (trueop0)));
2154 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2155
2156 if (GET_CODE (trueop0) == CONST_VECTOR)
2157 {
2158 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2159 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2160 rtvec v = rtvec_alloc (n_elts);
2161 unsigned int i;
2162
2163 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2164 for (i = 0; i < n_elts; i++)
2165 {
2166 rtx x = XVECEXP (trueop1, 0, i);
2167
2168 gcc_assert (GET_CODE (x) == CONST_INT);
2169 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2170 INTVAL (x));
2171 }
2172
2173 return gen_rtx_CONST_VECTOR (mode, v);
2174 }
2175 }
2176 return 0;
2177 case VEC_CONCAT:
2178 {
2179 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2180 ? GET_MODE (trueop0)
2181 : GET_MODE_INNER (mode));
2182 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2183 ? GET_MODE (trueop1)
2184 : GET_MODE_INNER (mode));
2185
2186 gcc_assert (VECTOR_MODE_P (mode));
2187 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2188 == GET_MODE_SIZE (mode));
2189
2190 if (VECTOR_MODE_P (op0_mode))
2191 gcc_assert (GET_MODE_INNER (mode)
2192 == GET_MODE_INNER (op0_mode));
2193 else
2194 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2195
2196 if (VECTOR_MODE_P (op1_mode))
2197 gcc_assert (GET_MODE_INNER (mode)
2198 == GET_MODE_INNER (op1_mode));
2199 else
2200 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2201
2202 if ((GET_CODE (trueop0) == CONST_VECTOR
2203 || GET_CODE (trueop0) == CONST_INT
2204 || GET_CODE (trueop0) == CONST_DOUBLE)
2205 && (GET_CODE (trueop1) == CONST_VECTOR
2206 || GET_CODE (trueop1) == CONST_INT
2207 || GET_CODE (trueop1) == CONST_DOUBLE))
2208 {
2209 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2210 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2211 rtvec v = rtvec_alloc (n_elts);
2212 unsigned int i;
2213 unsigned in_n_elts = 1;
2214
2215 if (VECTOR_MODE_P (op0_mode))
2216 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2217 for (i = 0; i < n_elts; i++)
2218 {
2219 if (i < in_n_elts)
2220 {
2221 if (!VECTOR_MODE_P (op0_mode))
2222 RTVEC_ELT (v, i) = trueop0;
2223 else
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2225 }
2226 else
2227 {
2228 if (!VECTOR_MODE_P (op1_mode))
2229 RTVEC_ELT (v, i) = trueop1;
2230 else
2231 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2232 i - in_n_elts);
2233 }
2234 }
2235
2236 return gen_rtx_CONST_VECTOR (mode, v);
2237 }
2238 }
2239 return 0;
2240
2241 default:
2242 gcc_unreachable ();
2243 }
2244
2245 return 0;
2246 }
2247
2248 /* Get the integer argument values in two forms:
2249 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2250
2251 arg0 = INTVAL (trueop0);
2252 arg1 = INTVAL (trueop1);
2253
2254 if (width < HOST_BITS_PER_WIDE_INT)
2255 {
2256 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2257 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2258
2259 arg0s = arg0;
2260 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2261 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2262
2263 arg1s = arg1;
2264 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2265 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2266 }
2267 else
2268 {
2269 arg0s = arg0;
2270 arg1s = arg1;
2271 }
2272
2273 /* Compute the value of the arithmetic. */
2274
2275 switch (code)
2276 {
2277 case PLUS:
2278 val = arg0s + arg1s;
2279 break;
2280
2281 case MINUS:
2282 val = arg0s - arg1s;
2283 break;
2284
2285 case MULT:
2286 val = arg0s * arg1s;
2287 break;
2288
2289 case DIV:
2290 if (arg1s == 0
2291 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2292 && arg1s == -1))
2293 return 0;
2294 val = arg0s / arg1s;
2295 break;
2296
2297 case MOD:
2298 if (arg1s == 0
2299 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2300 && arg1s == -1))
2301 return 0;
2302 val = arg0s % arg1s;
2303 break;
2304
2305 case UDIV:
2306 if (arg1 == 0
2307 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2308 && arg1s == -1))
2309 return 0;
2310 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2311 break;
2312
2313 case UMOD:
2314 if (arg1 == 0
2315 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2316 && arg1s == -1))
2317 return 0;
2318 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2319 break;
2320
2321 case AND:
2322 val = arg0 & arg1;
2323 break;
2324
2325 case IOR:
2326 val = arg0 | arg1;
2327 break;
2328
2329 case XOR:
2330 val = arg0 ^ arg1;
2331 break;
2332
2333 case LSHIFTRT:
2334 case ASHIFT:
2335 case ASHIFTRT:
2336 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2337 value is in range. We can't return any old value for out-of-range
2338 arguments because either the middle-end (via shift_truncation_mask)
2339 or the back-end might be relying on target-specific knowledge.
2340 Nor can we rely on shift_truncation_mask, since the shift might
2341 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2342 if (SHIFT_COUNT_TRUNCATED)
2343 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2344 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2345 return 0;
2346
2347 val = (code == ASHIFT
2348 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2349 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2350
2351 /* Sign-extend the result for arithmetic right shifts. */
2352 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2353 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2354 break;
2355
2356 case ROTATERT:
2357 if (arg1 < 0)
2358 return 0;
2359
2360 arg1 %= width;
2361 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2362 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2363 break;
2364
2365 case ROTATE:
2366 if (arg1 < 0)
2367 return 0;
2368
2369 arg1 %= width;
2370 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2371 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2372 break;
2373
2374 case COMPARE:
2375 /* Do nothing here. */
2376 return 0;
2377
2378 case SMIN:
2379 val = arg0s <= arg1s ? arg0s : arg1s;
2380 break;
2381
2382 case UMIN:
2383 val = ((unsigned HOST_WIDE_INT) arg0
2384 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2385 break;
2386
2387 case SMAX:
2388 val = arg0s > arg1s ? arg0s : arg1s;
2389 break;
2390
2391 case UMAX:
2392 val = ((unsigned HOST_WIDE_INT) arg0
2393 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2394 break;
2395
2396 case SS_PLUS:
2397 case US_PLUS:
2398 case SS_MINUS:
2399 case US_MINUS:
2400 /* ??? There are simplifications that can be done. */
2401 return 0;
2402
2403 default:
2404 gcc_unreachable ();
2405 }
2406
2407 val = trunc_int_for_mode (val, mode);
2408
2409 return GEN_INT (val);
2410 }
2411 \f
2412 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2413 PLUS or MINUS.
2414
2415 Rather than test for specific case, we do this by a brute-force method
2416 and do all possible simplifications until no more changes occur. Then
2417 we rebuild the operation.
2418
2419 If FORCE is true, then always generate the rtx. This is used to
2420 canonicalize stuff emitted from simplify_gen_binary. Note that this
2421 can still fail if the rtx is too complex. It won't fail just because
2422 the result is not 'simpler' than the input, however. */
2423
2424 struct simplify_plus_minus_op_data
2425 {
2426 rtx op;
2427 int neg;
2428 };
2429
2430 static int
2431 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2432 {
2433 const struct simplify_plus_minus_op_data *d1 = p1;
2434 const struct simplify_plus_minus_op_data *d2 = p2;
2435
2436 return (commutative_operand_precedence (d2->op)
2437 - commutative_operand_precedence (d1->op));
2438 }
2439
2440 static rtx
2441 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2442 rtx op1, int force)
2443 {
2444 struct simplify_plus_minus_op_data ops[8];
2445 rtx result, tem;
2446 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2447 int first, changed;
2448 int i, j;
2449
2450 memset (ops, 0, sizeof ops);
2451
2452 /* Set up the two operands and then expand them until nothing has been
2453 changed. If we run out of room in our array, give up; this should
2454 almost never happen. */
2455
2456 ops[0].op = op0;
2457 ops[0].neg = 0;
2458 ops[1].op = op1;
2459 ops[1].neg = (code == MINUS);
2460
2461 do
2462 {
2463 changed = 0;
2464
2465 for (i = 0; i < n_ops; i++)
2466 {
2467 rtx this_op = ops[i].op;
2468 int this_neg = ops[i].neg;
2469 enum rtx_code this_code = GET_CODE (this_op);
2470
2471 switch (this_code)
2472 {
2473 case PLUS:
2474 case MINUS:
2475 if (n_ops == 7)
2476 return NULL_RTX;
2477
2478 ops[n_ops].op = XEXP (this_op, 1);
2479 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2480 n_ops++;
2481
2482 ops[i].op = XEXP (this_op, 0);
2483 input_ops++;
2484 changed = 1;
2485 break;
2486
2487 case NEG:
2488 ops[i].op = XEXP (this_op, 0);
2489 ops[i].neg = ! this_neg;
2490 changed = 1;
2491 break;
2492
2493 case CONST:
2494 if (n_ops < 7
2495 && GET_CODE (XEXP (this_op, 0)) == PLUS
2496 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2497 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2498 {
2499 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2500 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2501 ops[n_ops].neg = this_neg;
2502 n_ops++;
2503 input_consts++;
2504 changed = 1;
2505 }
2506 break;
2507
2508 case NOT:
2509 /* ~a -> (-a - 1) */
2510 if (n_ops != 7)
2511 {
2512 ops[n_ops].op = constm1_rtx;
2513 ops[n_ops++].neg = this_neg;
2514 ops[i].op = XEXP (this_op, 0);
2515 ops[i].neg = !this_neg;
2516 changed = 1;
2517 }
2518 break;
2519
2520 case CONST_INT:
2521 if (this_neg)
2522 {
2523 ops[i].op = neg_const_int (mode, this_op);
2524 ops[i].neg = 0;
2525 changed = 1;
2526 }
2527 break;
2528
2529 default:
2530 break;
2531 }
2532 }
2533 }
2534 while (changed);
2535
2536 /* If we only have two operands, we can't do anything. */
2537 if (n_ops <= 2 && !force)
2538 return NULL_RTX;
2539
2540 /* Count the number of CONSTs we didn't split above. */
2541 for (i = 0; i < n_ops; i++)
2542 if (GET_CODE (ops[i].op) == CONST)
2543 input_consts++;
2544
2545 /* Now simplify each pair of operands until nothing changes. The first
2546 time through just simplify constants against each other. */
2547
2548 first = 1;
2549 do
2550 {
2551 changed = first;
2552
2553 for (i = 0; i < n_ops - 1; i++)
2554 for (j = i + 1; j < n_ops; j++)
2555 {
2556 rtx lhs = ops[i].op, rhs = ops[j].op;
2557 int lneg = ops[i].neg, rneg = ops[j].neg;
2558
2559 if (lhs != 0 && rhs != 0
2560 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2561 {
2562 enum rtx_code ncode = PLUS;
2563
2564 if (lneg != rneg)
2565 {
2566 ncode = MINUS;
2567 if (lneg)
2568 tem = lhs, lhs = rhs, rhs = tem;
2569 }
2570 else if (swap_commutative_operands_p (lhs, rhs))
2571 tem = lhs, lhs = rhs, rhs = tem;
2572
2573 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2574
2575 /* Reject "simplifications" that just wrap the two
2576 arguments in a CONST. Failure to do so can result
2577 in infinite recursion with simplify_binary_operation
2578 when it calls us to simplify CONST operations. */
2579 if (tem
2580 && ! (GET_CODE (tem) == CONST
2581 && GET_CODE (XEXP (tem, 0)) == ncode
2582 && XEXP (XEXP (tem, 0), 0) == lhs
2583 && XEXP (XEXP (tem, 0), 1) == rhs)
2584 /* Don't allow -x + -1 -> ~x simplifications in the
2585 first pass. This allows us the chance to combine
2586 the -1 with other constants. */
2587 && ! (first
2588 && GET_CODE (tem) == NOT
2589 && XEXP (tem, 0) == rhs))
2590 {
2591 lneg &= rneg;
2592 if (GET_CODE (tem) == NEG)
2593 tem = XEXP (tem, 0), lneg = !lneg;
2594 if (GET_CODE (tem) == CONST_INT && lneg)
2595 tem = neg_const_int (mode, tem), lneg = 0;
2596
2597 ops[i].op = tem;
2598 ops[i].neg = lneg;
2599 ops[j].op = NULL_RTX;
2600 changed = 1;
2601 }
2602 }
2603 }
2604
2605 first = 0;
2606 }
2607 while (changed);
2608
2609 /* Pack all the operands to the lower-numbered entries. */
2610 for (i = 0, j = 0; j < n_ops; j++)
2611 if (ops[j].op)
2612 ops[i++] = ops[j];
2613 n_ops = i;
2614
2615 /* Sort the operations based on swap_commutative_operands_p. */
2616 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2617
2618 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2619 if (n_ops == 2
2620 && GET_CODE (ops[1].op) == CONST_INT
2621 && CONSTANT_P (ops[0].op)
2622 && ops[0].neg)
2623 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2624
2625 /* We suppressed creation of trivial CONST expressions in the
2626 combination loop to avoid recursion. Create one manually now.
2627 The combination loop should have ensured that there is exactly
2628 one CONST_INT, and the sort will have ensured that it is last
2629 in the array and that any other constant will be next-to-last. */
2630
2631 if (n_ops > 1
2632 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2633 && CONSTANT_P (ops[n_ops - 2].op))
2634 {
2635 rtx value = ops[n_ops - 1].op;
2636 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2637 value = neg_const_int (mode, value);
2638 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2639 n_ops--;
2640 }
2641
2642 /* Count the number of CONSTs that we generated. */
2643 n_consts = 0;
2644 for (i = 0; i < n_ops; i++)
2645 if (GET_CODE (ops[i].op) == CONST)
2646 n_consts++;
2647
2648 /* Give up if we didn't reduce the number of operands we had. Make
2649 sure we count a CONST as two operands. If we have the same
2650 number of operands, but have made more CONSTs than before, this
2651 is also an improvement, so accept it. */
2652 if (!force
2653 && (n_ops + n_consts > input_ops
2654 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2655 return NULL_RTX;
2656
2657 /* Put a non-negated operand first, if possible. */
2658
2659 for (i = 0; i < n_ops && ops[i].neg; i++)
2660 continue;
2661 if (i == n_ops)
2662 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2663 else if (i != 0)
2664 {
2665 tem = ops[0].op;
2666 ops[0] = ops[i];
2667 ops[i].op = tem;
2668 ops[i].neg = 1;
2669 }
2670
2671 /* Now make the result by performing the requested operations. */
2672 result = ops[0].op;
2673 for (i = 1; i < n_ops; i++)
2674 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2675 mode, result, ops[i].op);
2676
2677 return result;
2678 }
2679
2680 /* Like simplify_binary_operation except used for relational operators.
2681 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2682 also be VOIDmode.
2683
2684 CMP_MODE specifies in which mode the comparison is done in, so it is
2685 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2686 the operands or, if both are VOIDmode, the operands are compared in
2687 "infinite precision". */
2688 rtx
2689 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2690 enum machine_mode cmp_mode, rtx op0, rtx op1)
2691 {
2692 rtx tem, trueop0, trueop1;
2693
2694 if (cmp_mode == VOIDmode)
2695 cmp_mode = GET_MODE (op0);
2696 if (cmp_mode == VOIDmode)
2697 cmp_mode = GET_MODE (op1);
2698
2699 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2700 if (tem)
2701 {
2702 #ifdef FLOAT_STORE_FLAG_VALUE
2703 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2704 {
2705 if (tem == const0_rtx)
2706 return CONST0_RTX (mode);
2707 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2708 {
2709 REAL_VALUE_TYPE val;
2710 val = FLOAT_STORE_FLAG_VALUE (mode);
2711 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2712 }
2713 }
2714 #endif
2715
2716 return tem;
2717 }
2718
2719 /* For the following tests, ensure const0_rtx is op1. */
2720 if (swap_commutative_operands_p (op0, op1)
2721 || (op0 == const0_rtx && op1 != const0_rtx))
2722 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2723
2724 /* If op0 is a compare, extract the comparison arguments from it. */
2725 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2726 return simplify_relational_operation (code, mode, VOIDmode,
2727 XEXP (op0, 0), XEXP (op0, 1));
2728
2729 if (mode == VOIDmode
2730 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2731 || CC0_P (op0))
2732 return NULL_RTX;
2733
2734 trueop0 = avoid_constant_pool_reference (op0);
2735 trueop1 = avoid_constant_pool_reference (op1);
2736 return simplify_relational_operation_1 (code, mode, cmp_mode,
2737 trueop0, trueop1);
2738 }
2739
2740 /* This part of simplify_relational_operation is only used when CMP_MODE
2741 is not in class MODE_CC (i.e. it is a real comparison).
2742
2743 MODE is the mode of the result, while CMP_MODE specifies in which
2744 mode the comparison is done in, so it is the mode of the operands. */
2745 rtx
2746 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2747 enum machine_mode cmp_mode, rtx op0, rtx op1)
2748 {
2749 if (GET_CODE (op1) == CONST_INT)
2750 {
2751 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2752 {
2753 /* If op0 is a comparison, extract the comparison arguments form it. */
2754 if (code == NE)
2755 {
2756 if (GET_MODE (op0) == cmp_mode)
2757 return simplify_rtx (op0);
2758 else
2759 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2760 XEXP (op0, 0), XEXP (op0, 1));
2761 }
2762 else if (code == EQ)
2763 {
2764 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2765 if (new_code != UNKNOWN)
2766 return simplify_gen_relational (new_code, mode, VOIDmode,
2767 XEXP (op0, 0), XEXP (op0, 1));
2768 }
2769 }
2770 }
2771
2772 return NULL_RTX;
2773 }
2774
2775 /* Check if the given comparison (done in the given MODE) is actually a
2776 tautology or a contradiction.
2777 If no simplification is possible, this function returns zero.
2778 Otherwise, it returns either const_true_rtx or const0_rtx. */
2779
2780 rtx
2781 simplify_const_relational_operation (enum rtx_code code,
2782 enum machine_mode mode,
2783 rtx op0, rtx op1)
2784 {
2785 int equal, op0lt, op0ltu, op1lt, op1ltu;
2786 rtx tem;
2787 rtx trueop0;
2788 rtx trueop1;
2789
2790 gcc_assert (mode != VOIDmode
2791 || (GET_MODE (op0) == VOIDmode
2792 && GET_MODE (op1) == VOIDmode));
2793
2794 /* If op0 is a compare, extract the comparison arguments from it. */
2795 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2796 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2797
2798 /* We can't simplify MODE_CC values since we don't know what the
2799 actual comparison is. */
2800 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2801 return 0;
2802
2803 /* Make sure the constant is second. */
2804 if (swap_commutative_operands_p (op0, op1))
2805 {
2806 tem = op0, op0 = op1, op1 = tem;
2807 code = swap_condition (code);
2808 }
2809
2810 trueop0 = avoid_constant_pool_reference (op0);
2811 trueop1 = avoid_constant_pool_reference (op1);
2812
2813 /* For integer comparisons of A and B maybe we can simplify A - B and can
2814 then simplify a comparison of that with zero. If A and B are both either
2815 a register or a CONST_INT, this can't help; testing for these cases will
2816 prevent infinite recursion here and speed things up.
2817
2818 If CODE is an unsigned comparison, then we can never do this optimization,
2819 because it gives an incorrect result if the subtraction wraps around zero.
2820 ANSI C defines unsigned operations such that they never overflow, and
2821 thus such cases can not be ignored; but we cannot do it even for
2822 signed comparisons for languages such as Java, so test flag_wrapv. */
2823
2824 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2825 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2826 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2827 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2828 /* We cannot do this for == or != if tem is a nonzero address. */
2829 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2830 && code != GTU && code != GEU && code != LTU && code != LEU)
2831 return simplify_const_relational_operation (signed_condition (code),
2832 mode, tem, const0_rtx);
2833
2834 if (flag_unsafe_math_optimizations && code == ORDERED)
2835 return const_true_rtx;
2836
2837 if (flag_unsafe_math_optimizations && code == UNORDERED)
2838 return const0_rtx;
2839
2840 /* For modes without NaNs, if the two operands are equal, we know the
2841 result except if they have side-effects. */
2842 if (! HONOR_NANS (GET_MODE (trueop0))
2843 && rtx_equal_p (trueop0, trueop1)
2844 && ! side_effects_p (trueop0))
2845 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2846
2847 /* If the operands are floating-point constants, see if we can fold
2848 the result. */
2849 else if (GET_CODE (trueop0) == CONST_DOUBLE
2850 && GET_CODE (trueop1) == CONST_DOUBLE
2851 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2852 {
2853 REAL_VALUE_TYPE d0, d1;
2854
2855 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2856 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2857
2858 /* Comparisons are unordered iff at least one of the values is NaN. */
2859 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2860 switch (code)
2861 {
2862 case UNEQ:
2863 case UNLT:
2864 case UNGT:
2865 case UNLE:
2866 case UNGE:
2867 case NE:
2868 case UNORDERED:
2869 return const_true_rtx;
2870 case EQ:
2871 case LT:
2872 case GT:
2873 case LE:
2874 case GE:
2875 case LTGT:
2876 case ORDERED:
2877 return const0_rtx;
2878 default:
2879 return 0;
2880 }
2881
2882 equal = REAL_VALUES_EQUAL (d0, d1);
2883 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2884 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2885 }
2886
2887 /* Otherwise, see if the operands are both integers. */
2888 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2889 && (GET_CODE (trueop0) == CONST_DOUBLE
2890 || GET_CODE (trueop0) == CONST_INT)
2891 && (GET_CODE (trueop1) == CONST_DOUBLE
2892 || GET_CODE (trueop1) == CONST_INT))
2893 {
2894 int width = GET_MODE_BITSIZE (mode);
2895 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2896 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2897
2898 /* Get the two words comprising each integer constant. */
2899 if (GET_CODE (trueop0) == CONST_DOUBLE)
2900 {
2901 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2902 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2903 }
2904 else
2905 {
2906 l0u = l0s = INTVAL (trueop0);
2907 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2908 }
2909
2910 if (GET_CODE (trueop1) == CONST_DOUBLE)
2911 {
2912 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2913 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2914 }
2915 else
2916 {
2917 l1u = l1s = INTVAL (trueop1);
2918 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2919 }
2920
2921 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2922 we have to sign or zero-extend the values. */
2923 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2924 {
2925 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2926 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2927
2928 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2929 l0s |= ((HOST_WIDE_INT) (-1) << width);
2930
2931 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2932 l1s |= ((HOST_WIDE_INT) (-1) << width);
2933 }
2934 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2935 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2936
2937 equal = (h0u == h1u && l0u == l1u);
2938 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2939 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2940 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2941 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2942 }
2943
2944 /* Otherwise, there are some code-specific tests we can make. */
2945 else
2946 {
2947 /* Optimize comparisons with upper and lower bounds. */
2948 if (SCALAR_INT_MODE_P (mode)
2949 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2950 {
2951 rtx mmin, mmax;
2952 int sign;
2953
2954 if (code == GEU
2955 || code == LEU
2956 || code == GTU
2957 || code == LTU)
2958 sign = 0;
2959 else
2960 sign = 1;
2961
2962 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2963
2964 tem = NULL_RTX;
2965 switch (code)
2966 {
2967 case GEU:
2968 case GE:
2969 /* x >= min is always true. */
2970 if (rtx_equal_p (trueop1, mmin))
2971 tem = const_true_rtx;
2972 else
2973 break;
2974
2975 case LEU:
2976 case LE:
2977 /* x <= max is always true. */
2978 if (rtx_equal_p (trueop1, mmax))
2979 tem = const_true_rtx;
2980 break;
2981
2982 case GTU:
2983 case GT:
2984 /* x > max is always false. */
2985 if (rtx_equal_p (trueop1, mmax))
2986 tem = const0_rtx;
2987 break;
2988
2989 case LTU:
2990 case LT:
2991 /* x < min is always false. */
2992 if (rtx_equal_p (trueop1, mmin))
2993 tem = const0_rtx;
2994 break;
2995
2996 default:
2997 break;
2998 }
2999 if (tem == const0_rtx
3000 || tem == const_true_rtx)
3001 return tem;
3002 }
3003
3004 switch (code)
3005 {
3006 case EQ:
3007 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3008 return const0_rtx;
3009 break;
3010
3011 case NE:
3012 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3013 return const_true_rtx;
3014 break;
3015
3016 case LT:
3017 /* Optimize abs(x) < 0.0. */
3018 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3019 {
3020 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3021 : trueop0;
3022 if (GET_CODE (tem) == ABS)
3023 return const0_rtx;
3024 }
3025 break;
3026
3027 case GE:
3028 /* Optimize abs(x) >= 0.0. */
3029 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3030 {
3031 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3032 : trueop0;
3033 if (GET_CODE (tem) == ABS)
3034 return const_true_rtx;
3035 }
3036 break;
3037
3038 case UNGE:
3039 /* Optimize ! (abs(x) < 0.0). */
3040 if (trueop1 == CONST0_RTX (mode))
3041 {
3042 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3043 : trueop0;
3044 if (GET_CODE (tem) == ABS)
3045 return const_true_rtx;
3046 }
3047 break;
3048
3049 default:
3050 break;
3051 }
3052
3053 return 0;
3054 }
3055
3056 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3057 as appropriate. */
3058 switch (code)
3059 {
3060 case EQ:
3061 case UNEQ:
3062 return equal ? const_true_rtx : const0_rtx;
3063 case NE:
3064 case LTGT:
3065 return ! equal ? const_true_rtx : const0_rtx;
3066 case LT:
3067 case UNLT:
3068 return op0lt ? const_true_rtx : const0_rtx;
3069 case GT:
3070 case UNGT:
3071 return op1lt ? const_true_rtx : const0_rtx;
3072 case LTU:
3073 return op0ltu ? const_true_rtx : const0_rtx;
3074 case GTU:
3075 return op1ltu ? const_true_rtx : const0_rtx;
3076 case LE:
3077 case UNLE:
3078 return equal || op0lt ? const_true_rtx : const0_rtx;
3079 case GE:
3080 case UNGE:
3081 return equal || op1lt ? const_true_rtx : const0_rtx;
3082 case LEU:
3083 return equal || op0ltu ? const_true_rtx : const0_rtx;
3084 case GEU:
3085 return equal || op1ltu ? const_true_rtx : const0_rtx;
3086 case ORDERED:
3087 return const_true_rtx;
3088 case UNORDERED:
3089 return const0_rtx;
3090 default:
3091 gcc_unreachable ();
3092 }
3093 }
3094 \f
3095 /* Simplify CODE, an operation with result mode MODE and three operands,
3096 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3097 a constant. Return 0 if no simplifications is possible. */
3098
3099 rtx
3100 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3101 enum machine_mode op0_mode, rtx op0, rtx op1,
3102 rtx op2)
3103 {
3104 unsigned int width = GET_MODE_BITSIZE (mode);
3105
3106 /* VOIDmode means "infinite" precision. */
3107 if (width == 0)
3108 width = HOST_BITS_PER_WIDE_INT;
3109
3110 switch (code)
3111 {
3112 case SIGN_EXTRACT:
3113 case ZERO_EXTRACT:
3114 if (GET_CODE (op0) == CONST_INT
3115 && GET_CODE (op1) == CONST_INT
3116 && GET_CODE (op2) == CONST_INT
3117 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3118 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3119 {
3120 /* Extracting a bit-field from a constant */
3121 HOST_WIDE_INT val = INTVAL (op0);
3122
3123 if (BITS_BIG_ENDIAN)
3124 val >>= (GET_MODE_BITSIZE (op0_mode)
3125 - INTVAL (op2) - INTVAL (op1));
3126 else
3127 val >>= INTVAL (op2);
3128
3129 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3130 {
3131 /* First zero-extend. */
3132 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3133 /* If desired, propagate sign bit. */
3134 if (code == SIGN_EXTRACT
3135 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3136 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3137 }
3138
3139 /* Clear the bits that don't belong in our mode,
3140 unless they and our sign bit are all one.
3141 So we get either a reasonable negative value or a reasonable
3142 unsigned value for this mode. */
3143 if (width < HOST_BITS_PER_WIDE_INT
3144 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3145 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3146 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3147
3148 return GEN_INT (val);
3149 }
3150 break;
3151
3152 case IF_THEN_ELSE:
3153 if (GET_CODE (op0) == CONST_INT)
3154 return op0 != const0_rtx ? op1 : op2;
3155
3156 /* Convert c ? a : a into "a". */
3157 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3158 return op1;
3159
3160 /* Convert a != b ? a : b into "a". */
3161 if (GET_CODE (op0) == NE
3162 && ! side_effects_p (op0)
3163 && ! HONOR_NANS (mode)
3164 && ! HONOR_SIGNED_ZEROS (mode)
3165 && ((rtx_equal_p (XEXP (op0, 0), op1)
3166 && rtx_equal_p (XEXP (op0, 1), op2))
3167 || (rtx_equal_p (XEXP (op0, 0), op2)
3168 && rtx_equal_p (XEXP (op0, 1), op1))))
3169 return op1;
3170
3171 /* Convert a == b ? a : b into "b". */
3172 if (GET_CODE (op0) == EQ
3173 && ! side_effects_p (op0)
3174 && ! HONOR_NANS (mode)
3175 && ! HONOR_SIGNED_ZEROS (mode)
3176 && ((rtx_equal_p (XEXP (op0, 0), op1)
3177 && rtx_equal_p (XEXP (op0, 1), op2))
3178 || (rtx_equal_p (XEXP (op0, 0), op2)
3179 && rtx_equal_p (XEXP (op0, 1), op1))))
3180 return op2;
3181
3182 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3183 {
3184 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3185 ? GET_MODE (XEXP (op0, 1))
3186 : GET_MODE (XEXP (op0, 0)));
3187 rtx temp;
3188
3189 /* Look for happy constants in op1 and op2. */
3190 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3191 {
3192 HOST_WIDE_INT t = INTVAL (op1);
3193 HOST_WIDE_INT f = INTVAL (op2);
3194
3195 if (t == STORE_FLAG_VALUE && f == 0)
3196 code = GET_CODE (op0);
3197 else if (t == 0 && f == STORE_FLAG_VALUE)
3198 {
3199 enum rtx_code tmp;
3200 tmp = reversed_comparison_code (op0, NULL_RTX);
3201 if (tmp == UNKNOWN)
3202 break;
3203 code = tmp;
3204 }
3205 else
3206 break;
3207
3208 return simplify_gen_relational (code, mode, cmp_mode,
3209 XEXP (op0, 0), XEXP (op0, 1));
3210 }
3211
3212 if (cmp_mode == VOIDmode)
3213 cmp_mode = op0_mode;
3214 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3215 cmp_mode, XEXP (op0, 0),
3216 XEXP (op0, 1));
3217
3218 /* See if any simplifications were possible. */
3219 if (temp)
3220 {
3221 if (GET_CODE (temp) == CONST_INT)
3222 return temp == const0_rtx ? op2 : op1;
3223 else if (temp)
3224 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3225 }
3226 }
3227 break;
3228
3229 case VEC_MERGE:
3230 gcc_assert (GET_MODE (op0) == mode);
3231 gcc_assert (GET_MODE (op1) == mode);
3232 gcc_assert (VECTOR_MODE_P (mode));
3233 op2 = avoid_constant_pool_reference (op2);
3234 if (GET_CODE (op2) == CONST_INT)
3235 {
3236 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3237 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3238 int mask = (1 << n_elts) - 1;
3239
3240 if (!(INTVAL (op2) & mask))
3241 return op1;
3242 if ((INTVAL (op2) & mask) == mask)
3243 return op0;
3244
3245 op0 = avoid_constant_pool_reference (op0);
3246 op1 = avoid_constant_pool_reference (op1);
3247 if (GET_CODE (op0) == CONST_VECTOR
3248 && GET_CODE (op1) == CONST_VECTOR)
3249 {
3250 rtvec v = rtvec_alloc (n_elts);
3251 unsigned int i;
3252
3253 for (i = 0; i < n_elts; i++)
3254 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3255 ? CONST_VECTOR_ELT (op0, i)
3256 : CONST_VECTOR_ELT (op1, i));
3257 return gen_rtx_CONST_VECTOR (mode, v);
3258 }
3259 }
3260 break;
3261
3262 default:
3263 gcc_unreachable ();
3264 }
3265
3266 return 0;
3267 }
3268
3269 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3270 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3271
3272 Works by unpacking OP into a collection of 8-bit values
3273 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3274 and then repacking them again for OUTERMODE. */
3275
3276 static rtx
3277 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3278 enum machine_mode innermode, unsigned int byte)
3279 {
3280 /* We support up to 512-bit values (for V8DFmode). */
3281 enum {
3282 max_bitsize = 512,
3283 value_bit = 8,
3284 value_mask = (1 << value_bit) - 1
3285 };
3286 unsigned char value[max_bitsize / value_bit];
3287 int value_start;
3288 int i;
3289 int elem;
3290
3291 int num_elem;
3292 rtx * elems;
3293 int elem_bitsize;
3294 rtx result_s;
3295 rtvec result_v = NULL;
3296 enum mode_class outer_class;
3297 enum machine_mode outer_submode;
3298
3299 /* Some ports misuse CCmode. */
3300 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3301 return op;
3302
3303 /* Unpack the value. */
3304
3305 if (GET_CODE (op) == CONST_VECTOR)
3306 {
3307 num_elem = CONST_VECTOR_NUNITS (op);
3308 elems = &CONST_VECTOR_ELT (op, 0);
3309 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3310 }
3311 else
3312 {
3313 num_elem = 1;
3314 elems = &op;
3315 elem_bitsize = max_bitsize;
3316 }
3317 /* If this asserts, it is too complicated; reducing value_bit may help. */
3318 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3319 /* I don't know how to handle endianness of sub-units. */
3320 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3321
3322 for (elem = 0; elem < num_elem; elem++)
3323 {
3324 unsigned char * vp;
3325 rtx el = elems[elem];
3326
3327 /* Vectors are kept in target memory order. (This is probably
3328 a mistake.) */
3329 {
3330 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3331 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3332 / BITS_PER_UNIT);
3333 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3334 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3335 unsigned bytele = (subword_byte % UNITS_PER_WORD
3336 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3337 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3338 }
3339
3340 switch (GET_CODE (el))
3341 {
3342 case CONST_INT:
3343 for (i = 0;
3344 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3345 i += value_bit)
3346 *vp++ = INTVAL (el) >> i;
3347 /* CONST_INTs are always logically sign-extended. */
3348 for (; i < elem_bitsize; i += value_bit)
3349 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3350 break;
3351
3352 case CONST_DOUBLE:
3353 if (GET_MODE (el) == VOIDmode)
3354 {
3355 /* If this triggers, someone should have generated a
3356 CONST_INT instead. */
3357 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3358
3359 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3360 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3361 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3362 {
3363 *vp++
3364 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3365 i += value_bit;
3366 }
3367 /* It shouldn't matter what's done here, so fill it with
3368 zero. */
3369 for (; i < max_bitsize; i += value_bit)
3370 *vp++ = 0;
3371 }
3372 else
3373 {
3374 long tmp[max_bitsize / 32];
3375 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3376
3377 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3378 gcc_assert (bitsize <= elem_bitsize);
3379 gcc_assert (bitsize % value_bit == 0);
3380
3381 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3382 GET_MODE (el));
3383
3384 /* real_to_target produces its result in words affected by
3385 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3386 and use WORDS_BIG_ENDIAN instead; see the documentation
3387 of SUBREG in rtl.texi. */
3388 for (i = 0; i < bitsize; i += value_bit)
3389 {
3390 int ibase;
3391 if (WORDS_BIG_ENDIAN)
3392 ibase = bitsize - 1 - i;
3393 else
3394 ibase = i;
3395 *vp++ = tmp[ibase / 32] >> i % 32;
3396 }
3397
3398 /* It shouldn't matter what's done here, so fill it with
3399 zero. */
3400 for (; i < elem_bitsize; i += value_bit)
3401 *vp++ = 0;
3402 }
3403 break;
3404
3405 default:
3406 gcc_unreachable ();
3407 }
3408 }
3409
3410 /* Now, pick the right byte to start with. */
3411 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3412 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3413 will already have offset 0. */
3414 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3415 {
3416 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3417 - byte);
3418 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3419 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3420 byte = (subword_byte % UNITS_PER_WORD
3421 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3422 }
3423
3424 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3425 so if it's become negative it will instead be very large.) */
3426 gcc_assert (byte < GET_MODE_SIZE (innermode));
3427
3428 /* Convert from bytes to chunks of size value_bit. */
3429 value_start = byte * (BITS_PER_UNIT / value_bit);
3430
3431 /* Re-pack the value. */
3432
3433 if (VECTOR_MODE_P (outermode))
3434 {
3435 num_elem = GET_MODE_NUNITS (outermode);
3436 result_v = rtvec_alloc (num_elem);
3437 elems = &RTVEC_ELT (result_v, 0);
3438 outer_submode = GET_MODE_INNER (outermode);
3439 }
3440 else
3441 {
3442 num_elem = 1;
3443 elems = &result_s;
3444 outer_submode = outermode;
3445 }
3446
3447 outer_class = GET_MODE_CLASS (outer_submode);
3448 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3449
3450 gcc_assert (elem_bitsize % value_bit == 0);
3451 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3452
3453 for (elem = 0; elem < num_elem; elem++)
3454 {
3455 unsigned char *vp;
3456
3457 /* Vectors are stored in target memory order. (This is probably
3458 a mistake.) */
3459 {
3460 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3461 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3462 / BITS_PER_UNIT);
3463 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3464 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3465 unsigned bytele = (subword_byte % UNITS_PER_WORD
3466 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3467 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3468 }
3469
3470 switch (outer_class)
3471 {
3472 case MODE_INT:
3473 case MODE_PARTIAL_INT:
3474 {
3475 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3476
3477 for (i = 0;
3478 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3479 i += value_bit)
3480 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3481 for (; i < elem_bitsize; i += value_bit)
3482 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3483 << (i - HOST_BITS_PER_WIDE_INT));
3484
3485 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3486 know why. */
3487 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3488 elems[elem] = gen_int_mode (lo, outer_submode);
3489 else
3490 elems[elem] = immed_double_const (lo, hi, outer_submode);
3491 }
3492 break;
3493
3494 case MODE_FLOAT:
3495 {
3496 REAL_VALUE_TYPE r;
3497 long tmp[max_bitsize / 32];
3498
3499 /* real_from_target wants its input in words affected by
3500 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3501 and use WORDS_BIG_ENDIAN instead; see the documentation
3502 of SUBREG in rtl.texi. */
3503 for (i = 0; i < max_bitsize / 32; i++)
3504 tmp[i] = 0;
3505 for (i = 0; i < elem_bitsize; i += value_bit)
3506 {
3507 int ibase;
3508 if (WORDS_BIG_ENDIAN)
3509 ibase = elem_bitsize - 1 - i;
3510 else
3511 ibase = i;
3512 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3513 }
3514
3515 real_from_target (&r, tmp, outer_submode);
3516 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3517 }
3518 break;
3519
3520 default:
3521 gcc_unreachable ();
3522 }
3523 }
3524 if (VECTOR_MODE_P (outermode))
3525 return gen_rtx_CONST_VECTOR (outermode, result_v);
3526 else
3527 return result_s;
3528 }
3529
3530 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3531 Return 0 if no simplifications are possible. */
3532 rtx
3533 simplify_subreg (enum machine_mode outermode, rtx op,
3534 enum machine_mode innermode, unsigned int byte)
3535 {
3536 /* Little bit of sanity checking. */
3537 gcc_assert (innermode != VOIDmode);
3538 gcc_assert (outermode != VOIDmode);
3539 gcc_assert (innermode != BLKmode);
3540 gcc_assert (outermode != BLKmode);
3541
3542 gcc_assert (GET_MODE (op) == innermode
3543 || GET_MODE (op) == VOIDmode);
3544
3545 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3546 gcc_assert (byte < GET_MODE_SIZE (innermode));
3547
3548 if (outermode == innermode && !byte)
3549 return op;
3550
3551 if (GET_CODE (op) == CONST_INT
3552 || GET_CODE (op) == CONST_DOUBLE
3553 || GET_CODE (op) == CONST_VECTOR)
3554 return simplify_immed_subreg (outermode, op, innermode, byte);
3555
3556 /* Changing mode twice with SUBREG => just change it once,
3557 or not at all if changing back op starting mode. */
3558 if (GET_CODE (op) == SUBREG)
3559 {
3560 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3561 int final_offset = byte + SUBREG_BYTE (op);
3562 rtx newx;
3563
3564 if (outermode == innermostmode
3565 && byte == 0 && SUBREG_BYTE (op) == 0)
3566 return SUBREG_REG (op);
3567
3568 /* The SUBREG_BYTE represents offset, as if the value were stored
3569 in memory. Irritating exception is paradoxical subreg, where
3570 we define SUBREG_BYTE to be 0. On big endian machines, this
3571 value should be negative. For a moment, undo this exception. */
3572 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3573 {
3574 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3575 if (WORDS_BIG_ENDIAN)
3576 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3577 if (BYTES_BIG_ENDIAN)
3578 final_offset += difference % UNITS_PER_WORD;
3579 }
3580 if (SUBREG_BYTE (op) == 0
3581 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3582 {
3583 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3584 if (WORDS_BIG_ENDIAN)
3585 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3586 if (BYTES_BIG_ENDIAN)
3587 final_offset += difference % UNITS_PER_WORD;
3588 }
3589
3590 /* See whether resulting subreg will be paradoxical. */
3591 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3592 {
3593 /* In nonparadoxical subregs we can't handle negative offsets. */
3594 if (final_offset < 0)
3595 return NULL_RTX;
3596 /* Bail out in case resulting subreg would be incorrect. */
3597 if (final_offset % GET_MODE_SIZE (outermode)
3598 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3599 return NULL_RTX;
3600 }
3601 else
3602 {
3603 int offset = 0;
3604 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3605
3606 /* In paradoxical subreg, see if we are still looking on lower part.
3607 If so, our SUBREG_BYTE will be 0. */
3608 if (WORDS_BIG_ENDIAN)
3609 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3610 if (BYTES_BIG_ENDIAN)
3611 offset += difference % UNITS_PER_WORD;
3612 if (offset == final_offset)
3613 final_offset = 0;
3614 else
3615 return NULL_RTX;
3616 }
3617
3618 /* Recurse for further possible simplifications. */
3619 newx = simplify_subreg (outermode, SUBREG_REG (op),
3620 GET_MODE (SUBREG_REG (op)),
3621 final_offset);
3622 if (newx)
3623 return newx;
3624 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3625 }
3626
3627 /* SUBREG of a hard register => just change the register number
3628 and/or mode. If the hard register is not valid in that mode,
3629 suppress this simplification. If the hard register is the stack,
3630 frame, or argument pointer, leave this as a SUBREG. */
3631
3632 if (REG_P (op)
3633 && REGNO (op) < FIRST_PSEUDO_REGISTER
3634 #ifdef CANNOT_CHANGE_MODE_CLASS
3635 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3636 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3637 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3638 #endif
3639 && ((reload_completed && !frame_pointer_needed)
3640 || (REGNO (op) != FRAME_POINTER_REGNUM
3641 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3642 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3643 #endif
3644 ))
3645 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3646 && REGNO (op) != ARG_POINTER_REGNUM
3647 #endif
3648 && REGNO (op) != STACK_POINTER_REGNUM
3649 && subreg_offset_representable_p (REGNO (op), innermode,
3650 byte, outermode))
3651 {
3652 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3653 int final_regno = subreg_hard_regno (tem, 0);
3654
3655 /* ??? We do allow it if the current REG is not valid for
3656 its mode. This is a kludge to work around how float/complex
3657 arguments are passed on 32-bit SPARC and should be fixed. */
3658 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3659 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3660 {
3661 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3662
3663 /* Propagate original regno. We don't have any way to specify
3664 the offset inside original regno, so do so only for lowpart.
3665 The information is used only by alias analysis that can not
3666 grog partial register anyway. */
3667
3668 if (subreg_lowpart_offset (outermode, innermode) == byte)
3669 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3670 return x;
3671 }
3672 }
3673
3674 /* If we have a SUBREG of a register that we are replacing and we are
3675 replacing it with a MEM, make a new MEM and try replacing the
3676 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3677 or if we would be widening it. */
3678
3679 if (MEM_P (op)
3680 && ! mode_dependent_address_p (XEXP (op, 0))
3681 /* Allow splitting of volatile memory references in case we don't
3682 have instruction to move the whole thing. */
3683 && (! MEM_VOLATILE_P (op)
3684 || ! have_insn_for (SET, innermode))
3685 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3686 return adjust_address_nv (op, outermode, byte);
3687
3688 /* Handle complex values represented as CONCAT
3689 of real and imaginary part. */
3690 if (GET_CODE (op) == CONCAT)
3691 {
3692 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3693 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3694 unsigned int final_offset;
3695 rtx res;
3696
3697 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3698 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3699 if (res)
3700 return res;
3701 /* We can at least simplify it by referring directly to the
3702 relevant part. */
3703 return gen_rtx_SUBREG (outermode, part, final_offset);
3704 }
3705
3706 /* Optimize SUBREG truncations of zero and sign extended values. */
3707 if ((GET_CODE (op) == ZERO_EXTEND
3708 || GET_CODE (op) == SIGN_EXTEND)
3709 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3710 {
3711 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3712
3713 /* If we're requesting the lowpart of a zero or sign extension,
3714 there are three possibilities. If the outermode is the same
3715 as the origmode, we can omit both the extension and the subreg.
3716 If the outermode is not larger than the origmode, we can apply
3717 the truncation without the extension. Finally, if the outermode
3718 is larger than the origmode, but both are integer modes, we
3719 can just extend to the appropriate mode. */
3720 if (bitpos == 0)
3721 {
3722 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3723 if (outermode == origmode)
3724 return XEXP (op, 0);
3725 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3726 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3727 subreg_lowpart_offset (outermode,
3728 origmode));
3729 if (SCALAR_INT_MODE_P (outermode))
3730 return simplify_gen_unary (GET_CODE (op), outermode,
3731 XEXP (op, 0), origmode);
3732 }
3733
3734 /* A SUBREG resulting from a zero extension may fold to zero if
3735 it extracts higher bits that the ZERO_EXTEND's source bits. */
3736 if (GET_CODE (op) == ZERO_EXTEND
3737 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3738 return CONST0_RTX (outermode);
3739 }
3740
3741 return NULL_RTX;
3742 }
3743
3744 /* Make a SUBREG operation or equivalent if it folds. */
3745
3746 rtx
3747 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3748 enum machine_mode innermode, unsigned int byte)
3749 {
3750 rtx newx;
3751 /* Little bit of sanity checking. */
3752 gcc_assert (innermode != VOIDmode);
3753 gcc_assert (outermode != VOIDmode);
3754 gcc_assert (innermode != BLKmode);
3755 gcc_assert (outermode != BLKmode);
3756
3757 gcc_assert (GET_MODE (op) == innermode
3758 || GET_MODE (op) == VOIDmode);
3759
3760 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3761 gcc_assert (byte < GET_MODE_SIZE (innermode));
3762
3763 newx = simplify_subreg (outermode, op, innermode, byte);
3764 if (newx)
3765 return newx;
3766
3767 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3768 return NULL_RTX;
3769
3770 return gen_rtx_SUBREG (outermode, op, byte);
3771 }
3772 /* Simplify X, an rtx expression.
3773
3774 Return the simplified expression or NULL if no simplifications
3775 were possible.
3776
3777 This is the preferred entry point into the simplification routines;
3778 however, we still allow passes to call the more specific routines.
3779
3780 Right now GCC has three (yes, three) major bodies of RTL simplification
3781 code that need to be unified.
3782
3783 1. fold_rtx in cse.c. This code uses various CSE specific
3784 information to aid in RTL simplification.
3785
3786 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3787 it uses combine specific information to aid in RTL
3788 simplification.
3789
3790 3. The routines in this file.
3791
3792
3793 Long term we want to only have one body of simplification code; to
3794 get to that state I recommend the following steps:
3795
3796 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3797 which are not pass dependent state into these routines.
3798
3799 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3800 use this routine whenever possible.
3801
3802 3. Allow for pass dependent state to be provided to these
3803 routines and add simplifications based on the pass dependent
3804 state. Remove code from cse.c & combine.c that becomes
3805 redundant/dead.
3806
3807 It will take time, but ultimately the compiler will be easier to
3808 maintain and improve. It's totally silly that when we add a
3809 simplification that it needs to be added to 4 places (3 for RTL
3810 simplification and 1 for tree simplification. */
3811
3812 rtx
3813 simplify_rtx (rtx x)
3814 {
3815 enum rtx_code code = GET_CODE (x);
3816 enum machine_mode mode = GET_MODE (x);
3817
3818 switch (GET_RTX_CLASS (code))
3819 {
3820 case RTX_UNARY:
3821 return simplify_unary_operation (code, mode,
3822 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3823 case RTX_COMM_ARITH:
3824 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3825 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3826
3827 /* Fall through.... */
3828
3829 case RTX_BIN_ARITH:
3830 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3831
3832 case RTX_TERNARY:
3833 case RTX_BITFIELD_OPS:
3834 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3835 XEXP (x, 0), XEXP (x, 1),
3836 XEXP (x, 2));
3837
3838 case RTX_COMPARE:
3839 case RTX_COMM_COMPARE:
3840 return simplify_relational_operation (code, mode,
3841 ((GET_MODE (XEXP (x, 0))
3842 != VOIDmode)
3843 ? GET_MODE (XEXP (x, 0))
3844 : GET_MODE (XEXP (x, 1))),
3845 XEXP (x, 0),
3846 XEXP (x, 1));
3847
3848 case RTX_EXTRA:
3849 if (code == SUBREG)
3850 return simplify_gen_subreg (mode, SUBREG_REG (x),
3851 GET_MODE (SUBREG_REG (x)),
3852 SUBREG_BYTE (x));
3853 break;
3854
3855 case RTX_OBJ:
3856 if (code == LO_SUM)
3857 {
3858 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3859 if (GET_CODE (XEXP (x, 0)) == HIGH
3860 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3861 return XEXP (x, 1);
3862 }
3863 break;
3864
3865 default:
3866 break;
3867 }
3868 return NULL;
3869 }