combine.c (combine_simplify_rtx): Adjust call to use simplify_relational_operation.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 static bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 return false;
100
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 }
105 \f
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
108
109 rtx
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
112 {
113 rtx tem;
114
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
119
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
124
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
127
128 if (code == PLUS || code == MINUS)
129 {
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
133 }
134
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 }
137 \f
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
140 rtx
141 avoid_constant_pool_reference (rtx x)
142 {
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
145
146 switch (GET_CODE (x))
147 {
148 case MEM:
149 break;
150
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
156 {
157 REAL_VALUE_TYPE d;
158
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 }
162 return x;
163
164 default:
165 return x;
166 }
167
168 addr = XEXP (x, 0);
169
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
172
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
175
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
179
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
182
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
187 {
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
190 }
191
192 return c;
193 }
194 \f
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
197
198 rtx
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
201 {
202 rtx tem;
203
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
207
208 return gen_rtx_fmt_e (code, mode, op);
209 }
210
211 /* Likewise for ternary operations. */
212
213 rtx
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
216 {
217 rtx tem;
218
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
223
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
225 }
226
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
229
230 rtx
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
233 {
234 rtx tem;
235
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
239
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
241 }
242 \f
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
245
246 rtx
247 simplify_replace_rtx (rtx x, rtx old, rtx new)
248 {
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
253
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
257
258 if (x == old)
259 return new;
260
261 switch (GET_RTX_CLASS (code))
262 {
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old, new);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
270
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
278
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old, new);
285 op1 = simplify_replace_rtx (op1, old, new);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
289
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old, new);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
302
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
306 {
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
314 }
315 break;
316
317 case RTX_OBJ:
318 if (code == MEM)
319 {
320 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
324 }
325 else if (code == LO_SUM)
326 {
327 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
329
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
333
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
337 }
338 else if (code == REG)
339 {
340 if (REG_P (old) && REGNO (x) == REGNO (old))
341 return new;
342 }
343 break;
344
345 default:
346 break;
347 }
348 return x;
349 }
350 \f
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
354 rtx
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
357 {
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
360
361 if (code == VEC_DUPLICATE)
362 {
363 if (!VECTOR_MODE_P (mode))
364 abort ();
365 if (GET_MODE (trueop) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop))
367 && GET_MODE_INNER (mode) != GET_MODE (trueop))
368 abort ();
369 if (GET_MODE (trueop) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop))
371 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
372 abort ();
373 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
374 || GET_CODE (trueop) == CONST_VECTOR)
375 {
376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
378 rtvec v = rtvec_alloc (n_elts);
379 unsigned int i;
380
381 if (GET_CODE (trueop) != CONST_VECTOR)
382 for (i = 0; i < n_elts; i++)
383 RTVEC_ELT (v, i) = trueop;
384 else
385 {
386 enum machine_mode inmode = GET_MODE (trueop);
387 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
388 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389
390 if (in_n_elts >= n_elts || n_elts % in_n_elts)
391 abort ();
392 for (i = 0; i < n_elts; i++)
393 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 }
395 return gen_rtx_CONST_VECTOR (mode, v);
396 }
397 }
398 else if (GET_CODE (op) == CONST)
399 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 {
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
410
411 if (op_n_elts != n_elts)
412 abort ();
413
414 for (i = 0; i < n_elts; i++)
415 {
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
422 }
423 return gen_rtx_CONST_VECTOR (mode, v);
424 }
425
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
429
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
432 {
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
435
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
440
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
444 }
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 if (op_mode == VOIDmode)
458 {
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
463 }
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
465 ;
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
468
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
472 }
473
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
476 {
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
479
480 switch (code)
481 {
482 case NOT:
483 val = ~ arg0;
484 break;
485
486 case NEG:
487 val = - arg0;
488 break;
489
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
493
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
500
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
504 ;
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
508
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
512 {
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
517 }
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
521
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
528
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
536
537 case TRUNCATE:
538 val = arg0;
539 break;
540
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
547 {
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
554 }
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
560
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
565 {
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
572 }
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574 {
575 val
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
580 }
581 else
582 return 0;
583 break;
584
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
591
592 default:
593 abort ();
594 }
595
596 val = trunc_int_for_mode (val, mode);
597
598 return GEN_INT (val);
599 }
600
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
607 {
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
610
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
615
616 switch (code)
617 {
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
622
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
626
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
633
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
637 {
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
642 }
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
646
647 case CLZ:
648 hv = 0;
649 if (h1 != 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
651 - HOST_BITS_PER_WIDE_INT;
652 else if (l1 != 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
655 lv = GET_MODE_BITSIZE (mode);
656 break;
657
658 case CTZ:
659 hv = 0;
660 if (l1 != 0)
661 lv = exact_log2 (l1 & -l1);
662 else if (h1 != 0)
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
667
668 case POPCOUNT:
669 hv = 0;
670 lv = 0;
671 while (l1)
672 lv++, l1 &= l1 - 1;
673 while (h1)
674 lv++, h1 &= h1 - 1;
675 break;
676
677 case PARITY:
678 hv = 0;
679 lv = 0;
680 while (l1)
681 lv++, l1 &= l1 - 1;
682 while (h1)
683 lv++, h1 &= h1 - 1;
684 lv &= 1;
685 break;
686
687 case TRUNCATE:
688 /* This is just a change-of-mode, so do nothing. */
689 lv = l1, hv = h1;
690 break;
691
692 case ZERO_EXTEND:
693 if (op_mode == VOIDmode)
694 abort ();
695
696 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
697 return 0;
698
699 hv = 0;
700 lv = l1 & GET_MODE_MASK (op_mode);
701 break;
702
703 case SIGN_EXTEND:
704 if (op_mode == VOIDmode
705 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
706 return 0;
707 else
708 {
709 lv = l1 & GET_MODE_MASK (op_mode);
710 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
711 && (lv & ((HOST_WIDE_INT) 1
712 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
713 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
714
715 hv = HWI_SIGN_EXTEND (lv);
716 }
717 break;
718
719 case SQRT:
720 return 0;
721
722 default:
723 return 0;
724 }
725
726 return immed_double_const (lv, hv, mode);
727 }
728
729 else if (GET_CODE (trueop) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode) == MODE_FLOAT)
731 {
732 REAL_VALUE_TYPE d, t;
733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
734
735 switch (code)
736 {
737 case SQRT:
738 if (HONOR_SNANS (mode) && real_isnan (&d))
739 return 0;
740 real_sqrt (&t, mode, &d);
741 d = t;
742 break;
743 case ABS:
744 d = REAL_VALUE_ABS (d);
745 break;
746 case NEG:
747 d = REAL_VALUE_NEGATE (d);
748 break;
749 case FLOAT_TRUNCATE:
750 d = real_value_truncate (mode, d);
751 break;
752 case FLOAT_EXTEND:
753 /* All this does is change the mode. */
754 break;
755 case FIX:
756 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
757 break;
758 case NOT:
759 {
760 long tmp[4];
761 int i;
762
763 real_to_target (tmp, &d, GET_MODE (trueop));
764 for (i = 0; i < 4; i++)
765 tmp[i] = ~tmp[i];
766 real_from_target (&d, tmp, mode);
767 }
768 default:
769 abort ();
770 }
771 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
772 }
773
774 else if (GET_CODE (trueop) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode) == MODE_INT
777 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
778 {
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
783
784 HOST_WIDE_INT xh, xl, th, tl;
785 REAL_VALUE_TYPE x, t;
786 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
787 switch (code)
788 {
789 case FIX:
790 if (REAL_VALUE_ISNAN (x))
791 return const0_rtx;
792
793 /* Test against the signed upper bound. */
794 if (width > HOST_BITS_PER_WIDE_INT)
795 {
796 th = ((unsigned HOST_WIDE_INT) 1
797 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
798 tl = -1;
799 }
800 else
801 {
802 th = 0;
803 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
804 }
805 real_from_integer (&t, VOIDmode, tl, th, 0);
806 if (REAL_VALUES_LESS (t, x))
807 {
808 xh = th;
809 xl = tl;
810 break;
811 }
812
813 /* Test against the signed lower bound. */
814 if (width > HOST_BITS_PER_WIDE_INT)
815 {
816 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
817 tl = 0;
818 }
819 else
820 {
821 th = -1;
822 tl = (HOST_WIDE_INT) -1 << (width - 1);
823 }
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (x, t))
826 {
827 xh = th;
828 xl = tl;
829 break;
830 }
831 REAL_VALUE_TO_INT (&xl, &xh, x);
832 break;
833
834 case UNSIGNED_FIX:
835 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
836 return const0_rtx;
837
838 /* Test against the unsigned upper bound. */
839 if (width == 2*HOST_BITS_PER_WIDE_INT)
840 {
841 th = -1;
842 tl = -1;
843 }
844 else if (width >= HOST_BITS_PER_WIDE_INT)
845 {
846 th = ((unsigned HOST_WIDE_INT) 1
847 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
848 tl = -1;
849 }
850 else
851 {
852 th = 0;
853 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
854 }
855 real_from_integer (&t, VOIDmode, tl, th, 1);
856 if (REAL_VALUES_LESS (t, x))
857 {
858 xh = th;
859 xl = tl;
860 break;
861 }
862
863 REAL_VALUE_TO_INT (&xl, &xh, x);
864 break;
865
866 default:
867 abort ();
868 }
869 return immed_double_const (xl, xh, mode);
870 }
871
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
874 else
875 {
876 enum rtx_code reversed;
877 rtx temp;
878
879 /* There are some simplifications we can do even if the operands
880 aren't constant. */
881 switch (code)
882 {
883 case NOT:
884 /* (not (not X)) == X. */
885 if (GET_CODE (op) == NOT)
886 return XEXP (op, 0);
887
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op)
890 && (mode == BImode || STORE_FLAG_VALUE == -1)
891 && ((reversed = reversed_comparison_code (op, NULL_RTX))
892 != UNKNOWN))
893 return simplify_gen_relational (reversed, mode, VOIDmode,
894 XEXP (op, 0), XEXP (op, 1));
895
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op) == PLUS
898 && XEXP (op, 1) == constm1_rtx)
899 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
900
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op) == NEG)
903 return plus_constant (XEXP (op, 0), -1);
904
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == XOR
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && (temp = simplify_unary_operation (NOT, mode,
909 XEXP (op, 1),
910 mode)) != 0)
911 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
912
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == PLUS
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && mode_signbit_p (mode, XEXP (op, 1))
917 && (temp = simplify_unary_operation (NOT, mode,
918 XEXP (op, 1),
919 mode)) != 0)
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
921
922
923
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
928 bother with. */
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
931 {
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
934 }
935
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE == -1
939 && COMPARISON_P (op)
940 && (reversed = reversed_comparison_code (op, NULL_RTX))
941 != UNKNOWN)
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
944
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
948
949 if (STORE_FLAG_VALUE == -1
950 && GET_CODE (op) == ASHIFTRT
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
953 return simplify_gen_relational (GE, mode, VOIDmode,
954 XEXP (op, 0), const0_rtx);
955
956 break;
957
958 case NEG:
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op) == NEG)
961 return XEXP (op, 0);
962
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op) == PLUS
965 && XEXP (op, 1) == const1_rtx)
966 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
967
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op) == NOT)
970 return plus_constant (XEXP (op, 0), 1);
971
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
981 XEXP (op, 0));
982
983 if (GET_CODE (op) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
986 {
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op, 1)) == CONST_INT
989 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
990 {
991 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
992 mode);
993 if (temp)
994 return simplify_gen_binary (MINUS, mode, temp,
995 XEXP (op, 0));
996 }
997
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1001 }
1002
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1007 {
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1010 }
1011
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1014 is a constant). */
1015 if (GET_CODE (op) == ASHIFT)
1016 {
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1018 mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp,
1021 XEXP (op, 1));
1022 }
1023
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == ASHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (LSHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1031
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == LSHIFTRT
1035 && GET_CODE (XEXP (op, 1)) == CONST_INT
1036 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1037 return simplify_gen_binary (ASHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1039
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1060 && GET_MODE (XEXP (op, 0)) == mode)
1061 return XEXP (op, 0);
1062
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode == Pmode && GET_MODE (op) == ptr_mode
1066 && (CONSTANT_P (op)
1067 || (GET_CODE (op) == SUBREG
1068 && GET_CODE (SUBREG_REG (op)) == REG
1069 && REG_POINTER (SUBREG_REG (op))
1070 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1071 return convert_memory_address (Pmode, op);
1072 #endif
1073 break;
1074
1075 case ZERO_EXTEND:
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op)
1082 && GET_MODE (XEXP (op, 0)) == mode)
1083 return XEXP (op, 0);
1084
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED > 0
1087 && mode == Pmode && GET_MODE (op) == ptr_mode
1088 && (CONSTANT_P (op)
1089 || (GET_CODE (op) == SUBREG
1090 && GET_CODE (SUBREG_REG (op)) == REG
1091 && REG_POINTER (SUBREG_REG (op))
1092 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1093 return convert_memory_address (Pmode, op);
1094 #endif
1095 break;
1096
1097 default:
1098 break;
1099 }
1100
1101 return 0;
1102 }
1103 }
1104 \f
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1110
1111 static rtx
1112 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1113 rtx op0, rtx op1)
1114 {
1115 rtx tem;
1116
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1) == code)
1119 {
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0) == code)
1122 {
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1125 }
1126
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1, op0))
1129 return simplify_gen_binary (code, mode, op1, op0);
1130
1131 tem = op0;
1132 op0 = op1;
1133 op1 = tem;
1134 }
1135
1136 if (GET_CODE (op0) == code)
1137 {
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1140 {
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1142 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1143 }
1144
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1151
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1154 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1155 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1156 if (tem != 0)
1157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1158 }
1159
1160 return 0;
1161 }
1162
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1165
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1168 rtx
1169 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1170 rtx op0, rtx op1)
1171 {
1172 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1173 HOST_WIDE_INT val;
1174 unsigned int width = GET_MODE_BITSIZE (mode);
1175 rtx trueop0, trueop1;
1176 rtx tem;
1177
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1183
1184 if (GET_RTX_CLASS (code) == RTX_COMPARE
1185 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1186 abort ();
1187 #endif
1188
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1192 {
1193 tem = op0, op0 = op1, op1 = tem;
1194 }
1195
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1198
1199 if (VECTOR_MODE_P (mode)
1200 && GET_CODE (trueop0) == CONST_VECTOR
1201 && GET_CODE (trueop1) == CONST_VECTOR)
1202 {
1203 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1204 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1205 enum machine_mode op0mode = GET_MODE (trueop0);
1206 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1207 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1208 enum machine_mode op1mode = GET_MODE (trueop1);
1209 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1210 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1211 rtvec v = rtvec_alloc (n_elts);
1212 unsigned int i;
1213
1214 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1215 abort ();
1216
1217 for (i = 0; i < n_elts; i++)
1218 {
1219 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1220 CONST_VECTOR_ELT (trueop0, i),
1221 CONST_VECTOR_ELT (trueop1, i));
1222 if (!x)
1223 return 0;
1224 RTVEC_ELT (v, i) = x;
1225 }
1226
1227 return gen_rtx_CONST_VECTOR (mode, v);
1228 }
1229
1230 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1231 && GET_CODE (trueop0) == CONST_DOUBLE
1232 && GET_CODE (trueop1) == CONST_DOUBLE
1233 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1234 {
1235 if (code == AND
1236 || code == IOR
1237 || code == XOR)
1238 {
1239 long tmp0[4];
1240 long tmp1[4];
1241 REAL_VALUE_TYPE r;
1242 int i;
1243
1244 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1245 GET_MODE (op0));
1246 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1247 GET_MODE (op1));
1248 for (i = 0; i < 4; i++)
1249 {
1250 if (code == AND)
1251 tmp0[i] &= tmp1[i];
1252 else if (code == IOR)
1253 tmp0[i] |= tmp1[i];
1254 else if (code == XOR)
1255 tmp0[i] ^= tmp1[i];
1256 else
1257 abort ();
1258 }
1259 real_from_target (&r, tmp0, mode);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1261 }
1262 else
1263 {
1264 REAL_VALUE_TYPE f0, f1, value;
1265
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1268 f0 = real_value_truncate (mode, f0);
1269 f1 = real_value_truncate (mode, f1);
1270
1271 if (HONOR_SNANS (mode)
1272 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1273 return 0;
1274
1275 if (code == DIV
1276 && REAL_VALUES_EQUAL (f1, dconst0)
1277 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1278 return 0;
1279
1280 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1281
1282 value = real_value_truncate (mode, value);
1283 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1284 }
1285 }
1286
1287 /* We can fold some multi-word operations. */
1288 if (GET_MODE_CLASS (mode) == MODE_INT
1289 && width == HOST_BITS_PER_WIDE_INT * 2
1290 && (GET_CODE (trueop0) == CONST_DOUBLE
1291 || GET_CODE (trueop0) == CONST_INT)
1292 && (GET_CODE (trueop1) == CONST_DOUBLE
1293 || GET_CODE (trueop1) == CONST_INT))
1294 {
1295 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1296 HOST_WIDE_INT h1, h2, hv, ht;
1297
1298 if (GET_CODE (trueop0) == CONST_DOUBLE)
1299 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1300 else
1301 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1302
1303 if (GET_CODE (trueop1) == CONST_DOUBLE)
1304 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1305 else
1306 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1307
1308 switch (code)
1309 {
1310 case MINUS:
1311 /* A - B == A + (-B). */
1312 neg_double (l2, h2, &lv, &hv);
1313 l2 = lv, h2 = hv;
1314
1315 /* Fall through.... */
1316
1317 case PLUS:
1318 add_double (l1, h1, l2, h2, &lv, &hv);
1319 break;
1320
1321 case MULT:
1322 mul_double (l1, h1, l2, h2, &lv, &hv);
1323 break;
1324
1325 case DIV:
1326 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1327 &lv, &hv, &lt, &ht))
1328 return 0;
1329 break;
1330
1331 case MOD:
1332 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1333 &lt, &ht, &lv, &hv))
1334 return 0;
1335 break;
1336
1337 case UDIV:
1338 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1339 &lv, &hv, &lt, &ht))
1340 return 0;
1341 break;
1342
1343 case UMOD:
1344 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1345 &lt, &ht, &lv, &hv))
1346 return 0;
1347 break;
1348
1349 case AND:
1350 lv = l1 & l2, hv = h1 & h2;
1351 break;
1352
1353 case IOR:
1354 lv = l1 | l2, hv = h1 | h2;
1355 break;
1356
1357 case XOR:
1358 lv = l1 ^ l2, hv = h1 ^ h2;
1359 break;
1360
1361 case SMIN:
1362 if (h1 < h2
1363 || (h1 == h2
1364 && ((unsigned HOST_WIDE_INT) l1
1365 < (unsigned HOST_WIDE_INT) l2)))
1366 lv = l1, hv = h1;
1367 else
1368 lv = l2, hv = h2;
1369 break;
1370
1371 case SMAX:
1372 if (h1 > h2
1373 || (h1 == h2
1374 && ((unsigned HOST_WIDE_INT) l1
1375 > (unsigned HOST_WIDE_INT) l2)))
1376 lv = l1, hv = h1;
1377 else
1378 lv = l2, hv = h2;
1379 break;
1380
1381 case UMIN:
1382 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1383 || (h1 == h2
1384 && ((unsigned HOST_WIDE_INT) l1
1385 < (unsigned HOST_WIDE_INT) l2)))
1386 lv = l1, hv = h1;
1387 else
1388 lv = l2, hv = h2;
1389 break;
1390
1391 case UMAX:
1392 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1393 || (h1 == h2
1394 && ((unsigned HOST_WIDE_INT) l1
1395 > (unsigned HOST_WIDE_INT) l2)))
1396 lv = l1, hv = h1;
1397 else
1398 lv = l2, hv = h2;
1399 break;
1400
1401 case LSHIFTRT: case ASHIFTRT:
1402 case ASHIFT:
1403 case ROTATE: case ROTATERT:
1404 if (SHIFT_COUNT_TRUNCATED)
1405 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1406
1407 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1408 return 0;
1409
1410 if (code == LSHIFTRT || code == ASHIFTRT)
1411 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1412 code == ASHIFTRT);
1413 else if (code == ASHIFT)
1414 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1415 else if (code == ROTATE)
1416 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1417 else /* code == ROTATERT */
1418 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1419 break;
1420
1421 default:
1422 return 0;
1423 }
1424
1425 return immed_double_const (lv, hv, mode);
1426 }
1427
1428 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1429 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1430 {
1431 /* Even if we can't compute a constant result,
1432 there are some cases worth simplifying. */
1433
1434 switch (code)
1435 {
1436 case PLUS:
1437 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1438 when x is NaN, infinite, or finite and nonzero. They aren't
1439 when x is -0 and the rounding mode is not towards -infinity,
1440 since (-0) + 0 is then 0. */
1441 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1442 return op0;
1443
1444 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1445 transformations are safe even for IEEE. */
1446 if (GET_CODE (op0) == NEG)
1447 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1448 else if (GET_CODE (op1) == NEG)
1449 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1450
1451 /* (~a) + 1 -> -a */
1452 if (INTEGRAL_MODE_P (mode)
1453 && GET_CODE (op0) == NOT
1454 && trueop1 == const1_rtx)
1455 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1456
1457 /* Handle both-operands-constant cases. We can only add
1458 CONST_INTs to constants since the sum of relocatable symbols
1459 can't be handled by most assemblers. Don't add CONST_INT
1460 to CONST_INT since overflow won't be computed properly if wider
1461 than HOST_BITS_PER_WIDE_INT. */
1462
1463 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1464 && GET_CODE (op1) == CONST_INT)
1465 return plus_constant (op0, INTVAL (op1));
1466 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1467 && GET_CODE (op0) == CONST_INT)
1468 return plus_constant (op1, INTVAL (op0));
1469
1470 /* See if this is something like X * C - X or vice versa or
1471 if the multiplication is written as a shift. If so, we can
1472 distribute and make a new multiply, shift, or maybe just
1473 have X (if C is 2 in the example above). But don't make
1474 real multiply if we didn't have one before. */
1475
1476 if (! FLOAT_MODE_P (mode))
1477 {
1478 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1479 rtx lhs = op0, rhs = op1;
1480 int had_mult = 0;
1481
1482 if (GET_CODE (lhs) == NEG)
1483 coeff0 = -1, lhs = XEXP (lhs, 0);
1484 else if (GET_CODE (lhs) == MULT
1485 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1486 {
1487 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1488 had_mult = 1;
1489 }
1490 else if (GET_CODE (lhs) == ASHIFT
1491 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1492 && INTVAL (XEXP (lhs, 1)) >= 0
1493 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1494 {
1495 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1496 lhs = XEXP (lhs, 0);
1497 }
1498
1499 if (GET_CODE (rhs) == NEG)
1500 coeff1 = -1, rhs = XEXP (rhs, 0);
1501 else if (GET_CODE (rhs) == MULT
1502 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1503 {
1504 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1505 had_mult = 1;
1506 }
1507 else if (GET_CODE (rhs) == ASHIFT
1508 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1509 && INTVAL (XEXP (rhs, 1)) >= 0
1510 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1511 {
1512 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1513 rhs = XEXP (rhs, 0);
1514 }
1515
1516 if (rtx_equal_p (lhs, rhs))
1517 {
1518 tem = simplify_gen_binary (MULT, mode, lhs,
1519 GEN_INT (coeff0 + coeff1));
1520 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1521 }
1522 }
1523
1524 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1525 if ((GET_CODE (op1) == CONST_INT
1526 || GET_CODE (op1) == CONST_DOUBLE)
1527 && GET_CODE (op0) == XOR
1528 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1529 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1530 && mode_signbit_p (mode, op1))
1531 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1532 simplify_gen_binary (XOR, mode, op1,
1533 XEXP (op0, 1)));
1534
1535 /* If one of the operands is a PLUS or a MINUS, see if we can
1536 simplify this by the associative law.
1537 Don't use the associative law for floating point.
1538 The inaccuracy makes it nonassociative,
1539 and subtle programs can break if operations are associated. */
1540
1541 if (INTEGRAL_MODE_P (mode)
1542 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1543 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1544 || (GET_CODE (op0) == CONST
1545 && GET_CODE (XEXP (op0, 0)) == PLUS)
1546 || (GET_CODE (op1) == CONST
1547 && GET_CODE (XEXP (op1, 0)) == PLUS))
1548 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1549 return tem;
1550
1551 /* Reassociate floating point addition only when the user
1552 specifies unsafe math optimizations. */
1553 if (FLOAT_MODE_P (mode)
1554 && flag_unsafe_math_optimizations)
1555 {
1556 tem = simplify_associative_operation (code, mode, op0, op1);
1557 if (tem)
1558 return tem;
1559 }
1560 break;
1561
1562 case COMPARE:
1563 #ifdef HAVE_cc0
1564 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1565 using cc0, in which case we want to leave it as a COMPARE
1566 so we can distinguish it from a register-register-copy.
1567
1568 In IEEE floating point, x-0 is not the same as x. */
1569
1570 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1571 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1572 && trueop1 == CONST0_RTX (mode))
1573 return op0;
1574 #endif
1575
1576 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1577 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1578 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1579 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1580 {
1581 rtx xop00 = XEXP (op0, 0);
1582 rtx xop10 = XEXP (op1, 0);
1583
1584 #ifdef HAVE_cc0
1585 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1586 #else
1587 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1588 && GET_MODE (xop00) == GET_MODE (xop10)
1589 && REGNO (xop00) == REGNO (xop10)
1590 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1591 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1592 #endif
1593 return xop00;
1594 }
1595 break;
1596
1597 case MINUS:
1598 /* We can't assume x-x is 0 even with non-IEEE floating point,
1599 but since it is zero except in very strange circumstances, we
1600 will treat it as zero with -funsafe-math-optimizations. */
1601 if (rtx_equal_p (trueop0, trueop1)
1602 && ! side_effects_p (op0)
1603 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1604 return CONST0_RTX (mode);
1605
1606 /* Change subtraction from zero into negation. (0 - x) is the
1607 same as -x when x is NaN, infinite, or finite and nonzero.
1608 But if the mode has signed zeros, and does not round towards
1609 -infinity, then 0 - 0 is 0, not -0. */
1610 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1611 return simplify_gen_unary (NEG, mode, op1, mode);
1612
1613 /* (-1 - a) is ~a. */
1614 if (trueop0 == constm1_rtx)
1615 return simplify_gen_unary (NOT, mode, op1, mode);
1616
1617 /* Subtracting 0 has no effect unless the mode has signed zeros
1618 and supports rounding towards -infinity. In such a case,
1619 0 - 0 is -0. */
1620 if (!(HONOR_SIGNED_ZEROS (mode)
1621 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1622 && trueop1 == CONST0_RTX (mode))
1623 return op0;
1624
1625 /* See if this is something like X * C - X or vice versa or
1626 if the multiplication is written as a shift. If so, we can
1627 distribute and make a new multiply, shift, or maybe just
1628 have X (if C is 2 in the example above). But don't make
1629 real multiply if we didn't have one before. */
1630
1631 if (! FLOAT_MODE_P (mode))
1632 {
1633 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1634 rtx lhs = op0, rhs = op1;
1635 int had_mult = 0;
1636
1637 if (GET_CODE (lhs) == NEG)
1638 coeff0 = -1, lhs = XEXP (lhs, 0);
1639 else if (GET_CODE (lhs) == MULT
1640 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1641 {
1642 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1643 had_mult = 1;
1644 }
1645 else if (GET_CODE (lhs) == ASHIFT
1646 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1647 && INTVAL (XEXP (lhs, 1)) >= 0
1648 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1649 {
1650 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1651 lhs = XEXP (lhs, 0);
1652 }
1653
1654 if (GET_CODE (rhs) == NEG)
1655 coeff1 = - 1, rhs = XEXP (rhs, 0);
1656 else if (GET_CODE (rhs) == MULT
1657 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1658 {
1659 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1660 had_mult = 1;
1661 }
1662 else if (GET_CODE (rhs) == ASHIFT
1663 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1664 && INTVAL (XEXP (rhs, 1)) >= 0
1665 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1666 {
1667 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1668 rhs = XEXP (rhs, 0);
1669 }
1670
1671 if (rtx_equal_p (lhs, rhs))
1672 {
1673 tem = simplify_gen_binary (MULT, mode, lhs,
1674 GEN_INT (coeff0 - coeff1));
1675 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1676 }
1677 }
1678
1679 /* (a - (-b)) -> (a + b). True even for IEEE. */
1680 if (GET_CODE (op1) == NEG)
1681 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1682
1683 /* (-x - c) may be simplified as (-c - x). */
1684 if (GET_CODE (op0) == NEG
1685 && (GET_CODE (op1) == CONST_INT
1686 || GET_CODE (op1) == CONST_DOUBLE))
1687 {
1688 tem = simplify_unary_operation (NEG, mode, op1, mode);
1689 if (tem)
1690 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1691 }
1692
1693 /* If one of the operands is a PLUS or a MINUS, see if we can
1694 simplify this by the associative law.
1695 Don't use the associative law for floating point.
1696 The inaccuracy makes it nonassociative,
1697 and subtle programs can break if operations are associated. */
1698
1699 if (INTEGRAL_MODE_P (mode)
1700 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1701 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1702 || (GET_CODE (op0) == CONST
1703 && GET_CODE (XEXP (op0, 0)) == PLUS)
1704 || (GET_CODE (op1) == CONST
1705 && GET_CODE (XEXP (op1, 0)) == PLUS))
1706 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1707 return tem;
1708
1709 /* Don't let a relocatable value get a negative coeff. */
1710 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1711 return simplify_gen_binary (PLUS, mode,
1712 op0,
1713 neg_const_int (mode, op1));
1714
1715 /* (x - (x & y)) -> (x & ~y) */
1716 if (GET_CODE (op1) == AND)
1717 {
1718 if (rtx_equal_p (op0, XEXP (op1, 0)))
1719 {
1720 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1721 GET_MODE (XEXP (op1, 1)));
1722 return simplify_gen_binary (AND, mode, op0, tem);
1723 }
1724 if (rtx_equal_p (op0, XEXP (op1, 1)))
1725 {
1726 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1727 GET_MODE (XEXP (op1, 0)));
1728 return simplify_gen_binary (AND, mode, op0, tem);
1729 }
1730 }
1731 break;
1732
1733 case MULT:
1734 if (trueop1 == constm1_rtx)
1735 return simplify_gen_unary (NEG, mode, op0, mode);
1736
1737 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1738 x is NaN, since x * 0 is then also NaN. Nor is it valid
1739 when the mode has signed zeros, since multiplying a negative
1740 number by 0 will give -0, not 0. */
1741 if (!HONOR_NANS (mode)
1742 && !HONOR_SIGNED_ZEROS (mode)
1743 && trueop1 == CONST0_RTX (mode)
1744 && ! side_effects_p (op0))
1745 return op1;
1746
1747 /* In IEEE floating point, x*1 is not equivalent to x for
1748 signalling NaNs. */
1749 if (!HONOR_SNANS (mode)
1750 && trueop1 == CONST1_RTX (mode))
1751 return op0;
1752
1753 /* Convert multiply by constant power of two into shift unless
1754 we are still generating RTL. This test is a kludge. */
1755 if (GET_CODE (trueop1) == CONST_INT
1756 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1757 /* If the mode is larger than the host word size, and the
1758 uppermost bit is set, then this isn't a power of two due
1759 to implicit sign extension. */
1760 && (width <= HOST_BITS_PER_WIDE_INT
1761 || val != HOST_BITS_PER_WIDE_INT - 1)
1762 && ! rtx_equal_function_value_matters)
1763 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1764
1765 /* x*2 is x+x and x*(-1) is -x */
1766 if (GET_CODE (trueop1) == CONST_DOUBLE
1767 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1768 && GET_MODE (op0) == mode)
1769 {
1770 REAL_VALUE_TYPE d;
1771 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1772
1773 if (REAL_VALUES_EQUAL (d, dconst2))
1774 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1775
1776 if (REAL_VALUES_EQUAL (d, dconstm1))
1777 return simplify_gen_unary (NEG, mode, op0, mode);
1778 }
1779
1780 /* Reassociate multiplication, but for floating point MULTs
1781 only when the user specifies unsafe math optimizations. */
1782 if (! FLOAT_MODE_P (mode)
1783 || flag_unsafe_math_optimizations)
1784 {
1785 tem = simplify_associative_operation (code, mode, op0, op1);
1786 if (tem)
1787 return tem;
1788 }
1789 break;
1790
1791 case IOR:
1792 if (trueop1 == const0_rtx)
1793 return op0;
1794 if (GET_CODE (trueop1) == CONST_INT
1795 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1796 == GET_MODE_MASK (mode)))
1797 return op1;
1798 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1799 return op0;
1800 /* A | (~A) -> -1 */
1801 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1802 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1803 && ! side_effects_p (op0)
1804 && GET_MODE_CLASS (mode) != MODE_CC)
1805 return constm1_rtx;
1806 tem = simplify_associative_operation (code, mode, op0, op1);
1807 if (tem)
1808 return tem;
1809 break;
1810
1811 case XOR:
1812 if (trueop1 == const0_rtx)
1813 return op0;
1814 if (GET_CODE (trueop1) == CONST_INT
1815 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1816 == GET_MODE_MASK (mode)))
1817 return simplify_gen_unary (NOT, mode, op0, mode);
1818 if (trueop0 == trueop1
1819 && ! side_effects_p (op0)
1820 && GET_MODE_CLASS (mode) != MODE_CC)
1821 return const0_rtx;
1822
1823 /* Canonicalize XOR of the most significant bit to PLUS. */
1824 if ((GET_CODE (op1) == CONST_INT
1825 || GET_CODE (op1) == CONST_DOUBLE)
1826 && mode_signbit_p (mode, op1))
1827 return simplify_gen_binary (PLUS, mode, op0, op1);
1828 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1829 if ((GET_CODE (op1) == CONST_INT
1830 || GET_CODE (op1) == CONST_DOUBLE)
1831 && GET_CODE (op0) == PLUS
1832 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1833 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1834 && mode_signbit_p (mode, XEXP (op0, 1)))
1835 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1836 simplify_gen_binary (XOR, mode, op1,
1837 XEXP (op0, 1)));
1838
1839 tem = simplify_associative_operation (code, mode, op0, op1);
1840 if (tem)
1841 return tem;
1842 break;
1843
1844 case AND:
1845 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1846 return const0_rtx;
1847 if (GET_CODE (trueop1) == CONST_INT
1848 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1849 == GET_MODE_MASK (mode)))
1850 return op0;
1851 if (trueop0 == trueop1 && ! side_effects_p (op0)
1852 && GET_MODE_CLASS (mode) != MODE_CC)
1853 return op0;
1854 /* A & (~A) -> 0 */
1855 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1856 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1857 && ! side_effects_p (op0)
1858 && GET_MODE_CLASS (mode) != MODE_CC)
1859 return const0_rtx;
1860 tem = simplify_associative_operation (code, mode, op0, op1);
1861 if (tem)
1862 return tem;
1863 break;
1864
1865 case UDIV:
1866 /* 0/x is 0 (or x&0 if x has side-effects). */
1867 if (trueop0 == const0_rtx)
1868 return side_effects_p (op1)
1869 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1870 : const0_rtx;
1871 /* x/1 is x. */
1872 if (trueop1 == const1_rtx)
1873 {
1874 /* Handle narrowing UDIV. */
1875 rtx x = gen_lowpart_common (mode, op0);
1876 if (x)
1877 return x;
1878 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1879 return gen_lowpart_SUBREG (mode, op0);
1880 return op0;
1881 }
1882 /* Convert divide by power of two into shift. */
1883 if (GET_CODE (trueop1) == CONST_INT
1884 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1885 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1886 break;
1887
1888 case DIV:
1889 /* Handle floating point and integers separately. */
1890 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1891 {
1892 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1893 safe for modes with NaNs, since 0.0 / 0.0 will then be
1894 NaN rather than 0.0. Nor is it safe for modes with signed
1895 zeros, since dividing 0 by a negative number gives -0.0 */
1896 if (trueop0 == CONST0_RTX (mode)
1897 && !HONOR_NANS (mode)
1898 && !HONOR_SIGNED_ZEROS (mode)
1899 && ! side_effects_p (op1))
1900 return op0;
1901 /* x/1.0 is x. */
1902 if (trueop1 == CONST1_RTX (mode)
1903 && !HONOR_SNANS (mode))
1904 return op0;
1905
1906 if (GET_CODE (trueop1) == CONST_DOUBLE
1907 && trueop1 != CONST0_RTX (mode))
1908 {
1909 REAL_VALUE_TYPE d;
1910 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1911
1912 /* x/-1.0 is -x. */
1913 if (REAL_VALUES_EQUAL (d, dconstm1)
1914 && !HONOR_SNANS (mode))
1915 return simplify_gen_unary (NEG, mode, op0, mode);
1916
1917 /* Change FP division by a constant into multiplication.
1918 Only do this with -funsafe-math-optimizations. */
1919 if (flag_unsafe_math_optimizations
1920 && !REAL_VALUES_EQUAL (d, dconst0))
1921 {
1922 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1923 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1924 return simplify_gen_binary (MULT, mode, op0, tem);
1925 }
1926 }
1927 }
1928 else
1929 {
1930 /* 0/x is 0 (or x&0 if x has side-effects). */
1931 if (trueop0 == const0_rtx)
1932 return side_effects_p (op1)
1933 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1934 : const0_rtx;
1935 /* x/1 is x. */
1936 if (trueop1 == const1_rtx)
1937 {
1938 /* Handle narrowing DIV. */
1939 rtx x = gen_lowpart_common (mode, op0);
1940 if (x)
1941 return x;
1942 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1943 return gen_lowpart_SUBREG (mode, op0);
1944 return op0;
1945 }
1946 /* x/-1 is -x. */
1947 if (trueop1 == constm1_rtx)
1948 {
1949 rtx x = gen_lowpart_common (mode, op0);
1950 if (!x)
1951 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1952 ? gen_lowpart_SUBREG (mode, op0) : op0;
1953 return simplify_gen_unary (NEG, mode, x, mode);
1954 }
1955 }
1956 break;
1957
1958 case UMOD:
1959 /* 0%x is 0 (or x&0 if x has side-effects). */
1960 if (trueop0 == const0_rtx)
1961 return side_effects_p (op1)
1962 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1963 : const0_rtx;
1964 /* x%1 is 0 (of x&0 if x has side-effects). */
1965 if (trueop1 == const1_rtx)
1966 return side_effects_p (op0)
1967 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1968 : const0_rtx;
1969 /* Implement modulus by power of two as AND. */
1970 if (GET_CODE (trueop1) == CONST_INT
1971 && exact_log2 (INTVAL (trueop1)) > 0)
1972 return simplify_gen_binary (AND, mode, op0,
1973 GEN_INT (INTVAL (op1) - 1));
1974 break;
1975
1976 case MOD:
1977 /* 0%x is 0 (or x&0 if x has side-effects). */
1978 if (trueop0 == const0_rtx)
1979 return side_effects_p (op1)
1980 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1981 : const0_rtx;
1982 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1983 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1984 return side_effects_p (op0)
1985 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1986 : const0_rtx;
1987 break;
1988
1989 case ROTATERT:
1990 case ROTATE:
1991 case ASHIFTRT:
1992 /* Rotating ~0 always results in ~0. */
1993 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1994 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1995 && ! side_effects_p (op1))
1996 return op0;
1997
1998 /* Fall through.... */
1999
2000 case ASHIFT:
2001 case LSHIFTRT:
2002 if (trueop1 == const0_rtx)
2003 return op0;
2004 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2005 return op0;
2006 break;
2007
2008 case SMIN:
2009 if (width <= HOST_BITS_PER_WIDE_INT
2010 && GET_CODE (trueop1) == CONST_INT
2011 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2012 && ! side_effects_p (op0))
2013 return op1;
2014 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2015 return op0;
2016 tem = simplify_associative_operation (code, mode, op0, op1);
2017 if (tem)
2018 return tem;
2019 break;
2020
2021 case SMAX:
2022 if (width <= HOST_BITS_PER_WIDE_INT
2023 && GET_CODE (trueop1) == CONST_INT
2024 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2025 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2026 && ! side_effects_p (op0))
2027 return op1;
2028 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2029 return op0;
2030 tem = simplify_associative_operation (code, mode, op0, op1);
2031 if (tem)
2032 return tem;
2033 break;
2034
2035 case UMIN:
2036 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2037 return op1;
2038 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2039 return op0;
2040 tem = simplify_associative_operation (code, mode, op0, op1);
2041 if (tem)
2042 return tem;
2043 break;
2044
2045 case UMAX:
2046 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2047 return op1;
2048 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2049 return op0;
2050 tem = simplify_associative_operation (code, mode, op0, op1);
2051 if (tem)
2052 return tem;
2053 break;
2054
2055 case SS_PLUS:
2056 case US_PLUS:
2057 case SS_MINUS:
2058 case US_MINUS:
2059 /* ??? There are simplifications that can be done. */
2060 return 0;
2061
2062 case VEC_SELECT:
2063 if (!VECTOR_MODE_P (mode))
2064 {
2065 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2066 || (mode
2067 != GET_MODE_INNER (GET_MODE (trueop0)))
2068 || GET_CODE (trueop1) != PARALLEL
2069 || XVECLEN (trueop1, 0) != 1
2070 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2071 abort ();
2072
2073 if (GET_CODE (trueop0) == CONST_VECTOR)
2074 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2075 }
2076 else
2077 {
2078 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2079 || (GET_MODE_INNER (mode)
2080 != GET_MODE_INNER (GET_MODE (trueop0)))
2081 || GET_CODE (trueop1) != PARALLEL)
2082 abort ();
2083
2084 if (GET_CODE (trueop0) == CONST_VECTOR)
2085 {
2086 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2087 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2088 rtvec v = rtvec_alloc (n_elts);
2089 unsigned int i;
2090
2091 if (XVECLEN (trueop1, 0) != (int) n_elts)
2092 abort ();
2093 for (i = 0; i < n_elts; i++)
2094 {
2095 rtx x = XVECEXP (trueop1, 0, i);
2096
2097 if (GET_CODE (x) != CONST_INT)
2098 abort ();
2099 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2100 }
2101
2102 return gen_rtx_CONST_VECTOR (mode, v);
2103 }
2104 }
2105 return 0;
2106 case VEC_CONCAT:
2107 {
2108 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2109 ? GET_MODE (trueop0)
2110 : GET_MODE_INNER (mode));
2111 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2112 ? GET_MODE (trueop1)
2113 : GET_MODE_INNER (mode));
2114
2115 if (!VECTOR_MODE_P (mode)
2116 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2117 != GET_MODE_SIZE (mode)))
2118 abort ();
2119
2120 if ((VECTOR_MODE_P (op0_mode)
2121 && (GET_MODE_INNER (mode)
2122 != GET_MODE_INNER (op0_mode)))
2123 || (!VECTOR_MODE_P (op0_mode)
2124 && GET_MODE_INNER (mode) != op0_mode))
2125 abort ();
2126
2127 if ((VECTOR_MODE_P (op1_mode)
2128 && (GET_MODE_INNER (mode)
2129 != GET_MODE_INNER (op1_mode)))
2130 || (!VECTOR_MODE_P (op1_mode)
2131 && GET_MODE_INNER (mode) != op1_mode))
2132 abort ();
2133
2134 if ((GET_CODE (trueop0) == CONST_VECTOR
2135 || GET_CODE (trueop0) == CONST_INT
2136 || GET_CODE (trueop0) == CONST_DOUBLE)
2137 && (GET_CODE (trueop1) == CONST_VECTOR
2138 || GET_CODE (trueop1) == CONST_INT
2139 || GET_CODE (trueop1) == CONST_DOUBLE))
2140 {
2141 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2142 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2143 rtvec v = rtvec_alloc (n_elts);
2144 unsigned int i;
2145 unsigned in_n_elts = 1;
2146
2147 if (VECTOR_MODE_P (op0_mode))
2148 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2149 for (i = 0; i < n_elts; i++)
2150 {
2151 if (i < in_n_elts)
2152 {
2153 if (!VECTOR_MODE_P (op0_mode))
2154 RTVEC_ELT (v, i) = trueop0;
2155 else
2156 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2157 }
2158 else
2159 {
2160 if (!VECTOR_MODE_P (op1_mode))
2161 RTVEC_ELT (v, i) = trueop1;
2162 else
2163 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2164 i - in_n_elts);
2165 }
2166 }
2167
2168 return gen_rtx_CONST_VECTOR (mode, v);
2169 }
2170 }
2171 return 0;
2172
2173 default:
2174 abort ();
2175 }
2176
2177 return 0;
2178 }
2179
2180 /* Get the integer argument values in two forms:
2181 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2182
2183 arg0 = INTVAL (trueop0);
2184 arg1 = INTVAL (trueop1);
2185
2186 if (width < HOST_BITS_PER_WIDE_INT)
2187 {
2188 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2189 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2190
2191 arg0s = arg0;
2192 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2193 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2194
2195 arg1s = arg1;
2196 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2197 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2198 }
2199 else
2200 {
2201 arg0s = arg0;
2202 arg1s = arg1;
2203 }
2204
2205 /* Compute the value of the arithmetic. */
2206
2207 switch (code)
2208 {
2209 case PLUS:
2210 val = arg0s + arg1s;
2211 break;
2212
2213 case MINUS:
2214 val = arg0s - arg1s;
2215 break;
2216
2217 case MULT:
2218 val = arg0s * arg1s;
2219 break;
2220
2221 case DIV:
2222 if (arg1s == 0
2223 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2224 && arg1s == -1))
2225 return 0;
2226 val = arg0s / arg1s;
2227 break;
2228
2229 case MOD:
2230 if (arg1s == 0
2231 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2232 && arg1s == -1))
2233 return 0;
2234 val = arg0s % arg1s;
2235 break;
2236
2237 case UDIV:
2238 if (arg1 == 0
2239 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2240 && arg1s == -1))
2241 return 0;
2242 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2243 break;
2244
2245 case UMOD:
2246 if (arg1 == 0
2247 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2248 && arg1s == -1))
2249 return 0;
2250 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2251 break;
2252
2253 case AND:
2254 val = arg0 & arg1;
2255 break;
2256
2257 case IOR:
2258 val = arg0 | arg1;
2259 break;
2260
2261 case XOR:
2262 val = arg0 ^ arg1;
2263 break;
2264
2265 case LSHIFTRT:
2266 /* If shift count is undefined, don't fold it; let the machine do
2267 what it wants. But truncate it if the machine will do that. */
2268 if (arg1 < 0)
2269 return 0;
2270
2271 if (SHIFT_COUNT_TRUNCATED)
2272 arg1 %= width;
2273
2274 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2275 break;
2276
2277 case ASHIFT:
2278 if (arg1 < 0)
2279 return 0;
2280
2281 if (SHIFT_COUNT_TRUNCATED)
2282 arg1 %= width;
2283
2284 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2285 break;
2286
2287 case ASHIFTRT:
2288 if (arg1 < 0)
2289 return 0;
2290
2291 if (SHIFT_COUNT_TRUNCATED)
2292 arg1 %= width;
2293
2294 val = arg0s >> arg1;
2295
2296 /* Bootstrap compiler may not have sign extended the right shift.
2297 Manually extend the sign to insure bootstrap cc matches gcc. */
2298 if (arg0s < 0 && arg1 > 0)
2299 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2300
2301 break;
2302
2303 case ROTATERT:
2304 if (arg1 < 0)
2305 return 0;
2306
2307 arg1 %= width;
2308 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2309 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2310 break;
2311
2312 case ROTATE:
2313 if (arg1 < 0)
2314 return 0;
2315
2316 arg1 %= width;
2317 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2318 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2319 break;
2320
2321 case COMPARE:
2322 /* Do nothing here. */
2323 return 0;
2324
2325 case SMIN:
2326 val = arg0s <= arg1s ? arg0s : arg1s;
2327 break;
2328
2329 case UMIN:
2330 val = ((unsigned HOST_WIDE_INT) arg0
2331 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2332 break;
2333
2334 case SMAX:
2335 val = arg0s > arg1s ? arg0s : arg1s;
2336 break;
2337
2338 case UMAX:
2339 val = ((unsigned HOST_WIDE_INT) arg0
2340 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2341 break;
2342
2343 case SS_PLUS:
2344 case US_PLUS:
2345 case SS_MINUS:
2346 case US_MINUS:
2347 /* ??? There are simplifications that can be done. */
2348 return 0;
2349
2350 default:
2351 abort ();
2352 }
2353
2354 val = trunc_int_for_mode (val, mode);
2355
2356 return GEN_INT (val);
2357 }
2358 \f
2359 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2360 PLUS or MINUS.
2361
2362 Rather than test for specific case, we do this by a brute-force method
2363 and do all possible simplifications until no more changes occur. Then
2364 we rebuild the operation.
2365
2366 If FORCE is true, then always generate the rtx. This is used to
2367 canonicalize stuff emitted from simplify_gen_binary. Note that this
2368 can still fail if the rtx is too complex. It won't fail just because
2369 the result is not 'simpler' than the input, however. */
2370
2371 struct simplify_plus_minus_op_data
2372 {
2373 rtx op;
2374 int neg;
2375 };
2376
2377 static int
2378 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2379 {
2380 const struct simplify_plus_minus_op_data *d1 = p1;
2381 const struct simplify_plus_minus_op_data *d2 = p2;
2382
2383 return (commutative_operand_precedence (d2->op)
2384 - commutative_operand_precedence (d1->op));
2385 }
2386
2387 static rtx
2388 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2389 rtx op1, int force)
2390 {
2391 struct simplify_plus_minus_op_data ops[8];
2392 rtx result, tem;
2393 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2394 int first, changed;
2395 int i, j;
2396
2397 memset (ops, 0, sizeof ops);
2398
2399 /* Set up the two operands and then expand them until nothing has been
2400 changed. If we run out of room in our array, give up; this should
2401 almost never happen. */
2402
2403 ops[0].op = op0;
2404 ops[0].neg = 0;
2405 ops[1].op = op1;
2406 ops[1].neg = (code == MINUS);
2407
2408 do
2409 {
2410 changed = 0;
2411
2412 for (i = 0; i < n_ops; i++)
2413 {
2414 rtx this_op = ops[i].op;
2415 int this_neg = ops[i].neg;
2416 enum rtx_code this_code = GET_CODE (this_op);
2417
2418 switch (this_code)
2419 {
2420 case PLUS:
2421 case MINUS:
2422 if (n_ops == 7)
2423 return NULL_RTX;
2424
2425 ops[n_ops].op = XEXP (this_op, 1);
2426 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2427 n_ops++;
2428
2429 ops[i].op = XEXP (this_op, 0);
2430 input_ops++;
2431 changed = 1;
2432 break;
2433
2434 case NEG:
2435 ops[i].op = XEXP (this_op, 0);
2436 ops[i].neg = ! this_neg;
2437 changed = 1;
2438 break;
2439
2440 case CONST:
2441 if (n_ops < 7
2442 && GET_CODE (XEXP (this_op, 0)) == PLUS
2443 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2444 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2445 {
2446 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2447 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2448 ops[n_ops].neg = this_neg;
2449 n_ops++;
2450 input_consts++;
2451 changed = 1;
2452 }
2453 break;
2454
2455 case NOT:
2456 /* ~a -> (-a - 1) */
2457 if (n_ops != 7)
2458 {
2459 ops[n_ops].op = constm1_rtx;
2460 ops[n_ops++].neg = this_neg;
2461 ops[i].op = XEXP (this_op, 0);
2462 ops[i].neg = !this_neg;
2463 changed = 1;
2464 }
2465 break;
2466
2467 case CONST_INT:
2468 if (this_neg)
2469 {
2470 ops[i].op = neg_const_int (mode, this_op);
2471 ops[i].neg = 0;
2472 changed = 1;
2473 }
2474 break;
2475
2476 default:
2477 break;
2478 }
2479 }
2480 }
2481 while (changed);
2482
2483 /* If we only have two operands, we can't do anything. */
2484 if (n_ops <= 2 && !force)
2485 return NULL_RTX;
2486
2487 /* Count the number of CONSTs we didn't split above. */
2488 for (i = 0; i < n_ops; i++)
2489 if (GET_CODE (ops[i].op) == CONST)
2490 input_consts++;
2491
2492 /* Now simplify each pair of operands until nothing changes. The first
2493 time through just simplify constants against each other. */
2494
2495 first = 1;
2496 do
2497 {
2498 changed = first;
2499
2500 for (i = 0; i < n_ops - 1; i++)
2501 for (j = i + 1; j < n_ops; j++)
2502 {
2503 rtx lhs = ops[i].op, rhs = ops[j].op;
2504 int lneg = ops[i].neg, rneg = ops[j].neg;
2505
2506 if (lhs != 0 && rhs != 0
2507 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2508 {
2509 enum rtx_code ncode = PLUS;
2510
2511 if (lneg != rneg)
2512 {
2513 ncode = MINUS;
2514 if (lneg)
2515 tem = lhs, lhs = rhs, rhs = tem;
2516 }
2517 else if (swap_commutative_operands_p (lhs, rhs))
2518 tem = lhs, lhs = rhs, rhs = tem;
2519
2520 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2521
2522 /* Reject "simplifications" that just wrap the two
2523 arguments in a CONST. Failure to do so can result
2524 in infinite recursion with simplify_binary_operation
2525 when it calls us to simplify CONST operations. */
2526 if (tem
2527 && ! (GET_CODE (tem) == CONST
2528 && GET_CODE (XEXP (tem, 0)) == ncode
2529 && XEXP (XEXP (tem, 0), 0) == lhs
2530 && XEXP (XEXP (tem, 0), 1) == rhs)
2531 /* Don't allow -x + -1 -> ~x simplifications in the
2532 first pass. This allows us the chance to combine
2533 the -1 with other constants. */
2534 && ! (first
2535 && GET_CODE (tem) == NOT
2536 && XEXP (tem, 0) == rhs))
2537 {
2538 lneg &= rneg;
2539 if (GET_CODE (tem) == NEG)
2540 tem = XEXP (tem, 0), lneg = !lneg;
2541 if (GET_CODE (tem) == CONST_INT && lneg)
2542 tem = neg_const_int (mode, tem), lneg = 0;
2543
2544 ops[i].op = tem;
2545 ops[i].neg = lneg;
2546 ops[j].op = NULL_RTX;
2547 changed = 1;
2548 }
2549 }
2550 }
2551
2552 first = 0;
2553 }
2554 while (changed);
2555
2556 /* Pack all the operands to the lower-numbered entries. */
2557 for (i = 0, j = 0; j < n_ops; j++)
2558 if (ops[j].op)
2559 ops[i++] = ops[j];
2560 n_ops = i;
2561
2562 /* Sort the operations based on swap_commutative_operands_p. */
2563 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2564
2565 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2566 if (n_ops == 2
2567 && GET_CODE (ops[1].op) == CONST_INT
2568 && CONSTANT_P (ops[0].op)
2569 && ops[0].neg)
2570 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2571
2572 /* We suppressed creation of trivial CONST expressions in the
2573 combination loop to avoid recursion. Create one manually now.
2574 The combination loop should have ensured that there is exactly
2575 one CONST_INT, and the sort will have ensured that it is last
2576 in the array and that any other constant will be next-to-last. */
2577
2578 if (n_ops > 1
2579 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2580 && CONSTANT_P (ops[n_ops - 2].op))
2581 {
2582 rtx value = ops[n_ops - 1].op;
2583 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2584 value = neg_const_int (mode, value);
2585 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2586 n_ops--;
2587 }
2588
2589 /* Count the number of CONSTs that we generated. */
2590 n_consts = 0;
2591 for (i = 0; i < n_ops; i++)
2592 if (GET_CODE (ops[i].op) == CONST)
2593 n_consts++;
2594
2595 /* Give up if we didn't reduce the number of operands we had. Make
2596 sure we count a CONST as two operands. If we have the same
2597 number of operands, but have made more CONSTs than before, this
2598 is also an improvement, so accept it. */
2599 if (!force
2600 && (n_ops + n_consts > input_ops
2601 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2602 return NULL_RTX;
2603
2604 /* Put a non-negated operand first, if possible. */
2605
2606 for (i = 0; i < n_ops && ops[i].neg; i++)
2607 continue;
2608 if (i == n_ops)
2609 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2610 else if (i != 0)
2611 {
2612 tem = ops[0].op;
2613 ops[0] = ops[i];
2614 ops[i].op = tem;
2615 ops[i].neg = 1;
2616 }
2617
2618 /* Now make the result by performing the requested operations. */
2619 result = ops[0].op;
2620 for (i = 1; i < n_ops; i++)
2621 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2622 mode, result, ops[i].op);
2623
2624 return result;
2625 }
2626
2627 /* Like simplify_binary_operation except used for relational operators.
2628 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2629 also be VOIDmode.
2630
2631 CMP_MODE specifies in which mode the comparison is done in, so it is
2632 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2633 the operands or, if both are VOIDmode, the operands are compared in
2634 "infinite precision". */
2635 rtx
2636 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2637 enum machine_mode cmp_mode, rtx op0, rtx op1)
2638 {
2639 rtx tem, trueop0, trueop1;
2640
2641 if (cmp_mode == VOIDmode)
2642 cmp_mode = GET_MODE (op0);
2643 if (cmp_mode == VOIDmode)
2644 cmp_mode = GET_MODE (op1);
2645
2646 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2647 if (tem)
2648 {
2649 #ifdef FLOAT_STORE_FLAG_VALUE
2650 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2651 {
2652 if (tem == const0_rtx)
2653 return CONST0_RTX (mode);
2654 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2655 {
2656 REAL_VALUE_TYPE val;
2657 val = FLOAT_STORE_FLAG_VALUE (mode);
2658 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2659 }
2660 }
2661 #endif
2662
2663 return tem;
2664 }
2665
2666 /* For the following tests, ensure const0_rtx is op1. */
2667 if (swap_commutative_operands_p (op0, op1)
2668 || (op0 == const0_rtx && op1 != const0_rtx))
2669 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2670
2671 /* If op0 is a compare, extract the comparison arguments from it. */
2672 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2673 return simplify_relational_operation (code, mode, VOIDmode,
2674 XEXP (op0, 0), XEXP (op0, 1));
2675
2676 if (mode == VOIDmode
2677 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2678 || CC0_P (op0))
2679 return NULL_RTX;
2680
2681 trueop0 = avoid_constant_pool_reference (op0);
2682 trueop1 = avoid_constant_pool_reference (op1);
2683 return simplify_relational_operation_1 (code, mode, cmp_mode,
2684 trueop0, trueop1);
2685 }
2686
2687 /* This part of simplify_relational_operation is only used when CMP_MODE
2688 is not in class MODE_CC (i.e. it is a real comparison).
2689
2690 MODE is the mode of the result, while CMP_MODE specifies in which
2691 mode the comparison is done in, so it is the mode of the operands. */
2692 rtx
2693 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2694 enum machine_mode cmp_mode, rtx op0, rtx op1)
2695 {
2696 if (GET_CODE (op1) == CONST_INT)
2697 {
2698 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2699 {
2700 /* If op0 is a comparison, extract the comparison arguments form it. */
2701 if (code == NE)
2702 {
2703 if (GET_MODE (op0) == cmp_mode)
2704 return simplify_rtx (op0);
2705 else
2706 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2707 XEXP (op0, 0), XEXP (op0, 1));
2708 }
2709 else if (code == EQ)
2710 {
2711 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
2712 if (new != UNKNOWN)
2713 return simplify_gen_relational (new, mode, VOIDmode,
2714 XEXP (op0, 0), XEXP (op0, 1));
2715 }
2716 }
2717 }
2718
2719 return NULL_RTX;
2720 }
2721
2722 /* Check if the given comparison (done in the given MODE) is actually a
2723 tautology or a contradiction.
2724 If no simplification is possible, this function returns zero.
2725 Otherwise, it returns either const_true_rtx or const0_rtx. */
2726
2727 rtx
2728 simplify_const_relational_operation (enum rtx_code code,
2729 enum machine_mode mode,
2730 rtx op0, rtx op1)
2731 {
2732 int equal, op0lt, op0ltu, op1lt, op1ltu;
2733 rtx tem;
2734 rtx trueop0;
2735 rtx trueop1;
2736
2737 if (mode == VOIDmode
2738 && (GET_MODE (op0) != VOIDmode
2739 || GET_MODE (op1) != VOIDmode))
2740 abort ();
2741
2742 /* If op0 is a compare, extract the comparison arguments from it. */
2743 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2744 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2745
2746 /* We can't simplify MODE_CC values since we don't know what the
2747 actual comparison is. */
2748 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2749 return 0;
2750
2751 /* Make sure the constant is second. */
2752 if (swap_commutative_operands_p (op0, op1))
2753 {
2754 tem = op0, op0 = op1, op1 = tem;
2755 code = swap_condition (code);
2756 }
2757
2758 trueop0 = avoid_constant_pool_reference (op0);
2759 trueop1 = avoid_constant_pool_reference (op1);
2760
2761 /* For integer comparisons of A and B maybe we can simplify A - B and can
2762 then simplify a comparison of that with zero. If A and B are both either
2763 a register or a CONST_INT, this can't help; testing for these cases will
2764 prevent infinite recursion here and speed things up.
2765
2766 If CODE is an unsigned comparison, then we can never do this optimization,
2767 because it gives an incorrect result if the subtraction wraps around zero.
2768 ANSI C defines unsigned operations such that they never overflow, and
2769 thus such cases can not be ignored; but we cannot do it even for
2770 signed comparisons for languages such as Java, so test flag_wrapv. */
2771
2772 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2773 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2774 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2775 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2776 /* We cannot do this for == or != if tem is a nonzero address. */
2777 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2778 && code != GTU && code != GEU && code != LTU && code != LEU)
2779 return simplify_const_relational_operation (signed_condition (code),
2780 mode, tem, const0_rtx);
2781
2782 if (flag_unsafe_math_optimizations && code == ORDERED)
2783 return const_true_rtx;
2784
2785 if (flag_unsafe_math_optimizations && code == UNORDERED)
2786 return const0_rtx;
2787
2788 /* For modes without NaNs, if the two operands are equal, we know the
2789 result except if they have side-effects. */
2790 if (! HONOR_NANS (GET_MODE (trueop0))
2791 && rtx_equal_p (trueop0, trueop1)
2792 && ! side_effects_p (trueop0))
2793 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2794
2795 /* If the operands are floating-point constants, see if we can fold
2796 the result. */
2797 else if (GET_CODE (trueop0) == CONST_DOUBLE
2798 && GET_CODE (trueop1) == CONST_DOUBLE
2799 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2800 {
2801 REAL_VALUE_TYPE d0, d1;
2802
2803 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2804 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2805
2806 /* Comparisons are unordered iff at least one of the values is NaN. */
2807 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2808 switch (code)
2809 {
2810 case UNEQ:
2811 case UNLT:
2812 case UNGT:
2813 case UNLE:
2814 case UNGE:
2815 case NE:
2816 case UNORDERED:
2817 return const_true_rtx;
2818 case EQ:
2819 case LT:
2820 case GT:
2821 case LE:
2822 case GE:
2823 case LTGT:
2824 case ORDERED:
2825 return const0_rtx;
2826 default:
2827 return 0;
2828 }
2829
2830 equal = REAL_VALUES_EQUAL (d0, d1);
2831 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2832 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2833 }
2834
2835 /* Otherwise, see if the operands are both integers. */
2836 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2837 && (GET_CODE (trueop0) == CONST_DOUBLE
2838 || GET_CODE (trueop0) == CONST_INT)
2839 && (GET_CODE (trueop1) == CONST_DOUBLE
2840 || GET_CODE (trueop1) == CONST_INT))
2841 {
2842 int width = GET_MODE_BITSIZE (mode);
2843 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2844 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2845
2846 /* Get the two words comprising each integer constant. */
2847 if (GET_CODE (trueop0) == CONST_DOUBLE)
2848 {
2849 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2850 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2851 }
2852 else
2853 {
2854 l0u = l0s = INTVAL (trueop0);
2855 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2856 }
2857
2858 if (GET_CODE (trueop1) == CONST_DOUBLE)
2859 {
2860 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2861 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2862 }
2863 else
2864 {
2865 l1u = l1s = INTVAL (trueop1);
2866 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2867 }
2868
2869 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2870 we have to sign or zero-extend the values. */
2871 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2872 {
2873 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2874 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2875
2876 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2877 l0s |= ((HOST_WIDE_INT) (-1) << width);
2878
2879 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2880 l1s |= ((HOST_WIDE_INT) (-1) << width);
2881 }
2882 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2883 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2884
2885 equal = (h0u == h1u && l0u == l1u);
2886 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2887 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2888 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2889 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2890 }
2891
2892 /* Otherwise, there are some code-specific tests we can make. */
2893 else
2894 {
2895 switch (code)
2896 {
2897 case EQ:
2898 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2899 return const0_rtx;
2900 break;
2901
2902 case NE:
2903 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2904 return const_true_rtx;
2905 break;
2906
2907 case GEU:
2908 /* Unsigned values are never negative. */
2909 if (trueop1 == const0_rtx)
2910 return const_true_rtx;
2911 break;
2912
2913 case LTU:
2914 if (trueop1 == const0_rtx)
2915 return const0_rtx;
2916 break;
2917
2918 case LEU:
2919 /* Unsigned values are never greater than the largest
2920 unsigned value. */
2921 if (GET_CODE (trueop1) == CONST_INT
2922 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2923 && INTEGRAL_MODE_P (mode))
2924 return const_true_rtx;
2925 break;
2926
2927 case GTU:
2928 if (GET_CODE (trueop1) == CONST_INT
2929 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2930 && INTEGRAL_MODE_P (mode))
2931 return const0_rtx;
2932 break;
2933
2934 case LT:
2935 /* Optimize abs(x) < 0.0. */
2936 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2937 {
2938 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2939 : trueop0;
2940 if (GET_CODE (tem) == ABS)
2941 return const0_rtx;
2942 }
2943 break;
2944
2945 case GE:
2946 /* Optimize abs(x) >= 0.0. */
2947 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2948 {
2949 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2950 : trueop0;
2951 if (GET_CODE (tem) == ABS)
2952 return const_true_rtx;
2953 }
2954 break;
2955
2956 case UNGE:
2957 /* Optimize ! (abs(x) < 0.0). */
2958 if (trueop1 == CONST0_RTX (mode))
2959 {
2960 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2961 : trueop0;
2962 if (GET_CODE (tem) == ABS)
2963 return const_true_rtx;
2964 }
2965 break;
2966
2967 default:
2968 break;
2969 }
2970
2971 return 0;
2972 }
2973
2974 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2975 as appropriate. */
2976 switch (code)
2977 {
2978 case EQ:
2979 case UNEQ:
2980 return equal ? const_true_rtx : const0_rtx;
2981 case NE:
2982 case LTGT:
2983 return ! equal ? const_true_rtx : const0_rtx;
2984 case LT:
2985 case UNLT:
2986 return op0lt ? const_true_rtx : const0_rtx;
2987 case GT:
2988 case UNGT:
2989 return op1lt ? const_true_rtx : const0_rtx;
2990 case LTU:
2991 return op0ltu ? const_true_rtx : const0_rtx;
2992 case GTU:
2993 return op1ltu ? const_true_rtx : const0_rtx;
2994 case LE:
2995 case UNLE:
2996 return equal || op0lt ? const_true_rtx : const0_rtx;
2997 case GE:
2998 case UNGE:
2999 return equal || op1lt ? const_true_rtx : const0_rtx;
3000 case LEU:
3001 return equal || op0ltu ? const_true_rtx : const0_rtx;
3002 case GEU:
3003 return equal || op1ltu ? const_true_rtx : const0_rtx;
3004 case ORDERED:
3005 return const_true_rtx;
3006 case UNORDERED:
3007 return const0_rtx;
3008 default:
3009 abort ();
3010 }
3011 }
3012 \f
3013 /* Simplify CODE, an operation with result mode MODE and three operands,
3014 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3015 a constant. Return 0 if no simplifications is possible. */
3016
3017 rtx
3018 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3019 enum machine_mode op0_mode, rtx op0, rtx op1,
3020 rtx op2)
3021 {
3022 unsigned int width = GET_MODE_BITSIZE (mode);
3023
3024 /* VOIDmode means "infinite" precision. */
3025 if (width == 0)
3026 width = HOST_BITS_PER_WIDE_INT;
3027
3028 switch (code)
3029 {
3030 case SIGN_EXTRACT:
3031 case ZERO_EXTRACT:
3032 if (GET_CODE (op0) == CONST_INT
3033 && GET_CODE (op1) == CONST_INT
3034 && GET_CODE (op2) == CONST_INT
3035 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3036 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3037 {
3038 /* Extracting a bit-field from a constant */
3039 HOST_WIDE_INT val = INTVAL (op0);
3040
3041 if (BITS_BIG_ENDIAN)
3042 val >>= (GET_MODE_BITSIZE (op0_mode)
3043 - INTVAL (op2) - INTVAL (op1));
3044 else
3045 val >>= INTVAL (op2);
3046
3047 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3048 {
3049 /* First zero-extend. */
3050 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3051 /* If desired, propagate sign bit. */
3052 if (code == SIGN_EXTRACT
3053 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3054 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3055 }
3056
3057 /* Clear the bits that don't belong in our mode,
3058 unless they and our sign bit are all one.
3059 So we get either a reasonable negative value or a reasonable
3060 unsigned value for this mode. */
3061 if (width < HOST_BITS_PER_WIDE_INT
3062 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3063 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3064 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3065
3066 return GEN_INT (val);
3067 }
3068 break;
3069
3070 case IF_THEN_ELSE:
3071 if (GET_CODE (op0) == CONST_INT)
3072 return op0 != const0_rtx ? op1 : op2;
3073
3074 /* Convert c ? a : a into "a". */
3075 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3076 return op1;
3077
3078 /* Convert a != b ? a : b into "a". */
3079 if (GET_CODE (op0) == NE
3080 && ! side_effects_p (op0)
3081 && ! HONOR_NANS (mode)
3082 && ! HONOR_SIGNED_ZEROS (mode)
3083 && ((rtx_equal_p (XEXP (op0, 0), op1)
3084 && rtx_equal_p (XEXP (op0, 1), op2))
3085 || (rtx_equal_p (XEXP (op0, 0), op2)
3086 && rtx_equal_p (XEXP (op0, 1), op1))))
3087 return op1;
3088
3089 /* Convert a == b ? a : b into "b". */
3090 if (GET_CODE (op0) == EQ
3091 && ! side_effects_p (op0)
3092 && ! HONOR_NANS (mode)
3093 && ! HONOR_SIGNED_ZEROS (mode)
3094 && ((rtx_equal_p (XEXP (op0, 0), op1)
3095 && rtx_equal_p (XEXP (op0, 1), op2))
3096 || (rtx_equal_p (XEXP (op0, 0), op2)
3097 && rtx_equal_p (XEXP (op0, 1), op1))))
3098 return op2;
3099
3100 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3101 {
3102 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3103 ? GET_MODE (XEXP (op0, 1))
3104 : GET_MODE (XEXP (op0, 0)));
3105 rtx temp;
3106
3107 /* Look for happy constants in op1 and op2. */
3108 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3109 {
3110 HOST_WIDE_INT t = INTVAL (op1);
3111 HOST_WIDE_INT f = INTVAL (op2);
3112
3113 if (t == STORE_FLAG_VALUE && f == 0)
3114 code = GET_CODE (op0);
3115 else if (t == 0 && f == STORE_FLAG_VALUE)
3116 {
3117 enum rtx_code tmp;
3118 tmp = reversed_comparison_code (op0, NULL_RTX);
3119 if (tmp == UNKNOWN)
3120 break;
3121 code = tmp;
3122 }
3123 else
3124 break;
3125
3126 return simplify_gen_relational (code, op0_mode, cmp_mode,
3127 XEXP (op0, 0), XEXP (op0, 1));
3128 }
3129
3130 if (cmp_mode == VOIDmode)
3131 cmp_mode = op0_mode;
3132 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3133 cmp_mode, XEXP (op0, 0),
3134 XEXP (op0, 1));
3135
3136 /* See if any simplifications were possible. */
3137 if (temp)
3138 {
3139 if (GET_CODE (temp) == CONST_INT)
3140 return temp == const0_rtx ? op2 : op1;
3141 else if (temp)
3142 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3143 }
3144 }
3145 break;
3146
3147 case VEC_MERGE:
3148 if (GET_MODE (op0) != mode
3149 || GET_MODE (op1) != mode
3150 || !VECTOR_MODE_P (mode))
3151 abort ();
3152 op2 = avoid_constant_pool_reference (op2);
3153 if (GET_CODE (op2) == CONST_INT)
3154 {
3155 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3156 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3157 int mask = (1 << n_elts) - 1;
3158
3159 if (!(INTVAL (op2) & mask))
3160 return op1;
3161 if ((INTVAL (op2) & mask) == mask)
3162 return op0;
3163
3164 op0 = avoid_constant_pool_reference (op0);
3165 op1 = avoid_constant_pool_reference (op1);
3166 if (GET_CODE (op0) == CONST_VECTOR
3167 && GET_CODE (op1) == CONST_VECTOR)
3168 {
3169 rtvec v = rtvec_alloc (n_elts);
3170 unsigned int i;
3171
3172 for (i = 0; i < n_elts; i++)
3173 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3174 ? CONST_VECTOR_ELT (op0, i)
3175 : CONST_VECTOR_ELT (op1, i));
3176 return gen_rtx_CONST_VECTOR (mode, v);
3177 }
3178 }
3179 break;
3180
3181 default:
3182 abort ();
3183 }
3184
3185 return 0;
3186 }
3187
3188 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3189 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3190
3191 Works by unpacking OP into a collection of 8-bit values
3192 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3193 and then repacking them again for OUTERMODE. */
3194
3195 static rtx
3196 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3197 enum machine_mode innermode, unsigned int byte)
3198 {
3199 /* We support up to 512-bit values (for V8DFmode). */
3200 enum {
3201 max_bitsize = 512,
3202 value_bit = 8,
3203 value_mask = (1 << value_bit) - 1
3204 };
3205 unsigned char value[max_bitsize / value_bit];
3206 int value_start;
3207 int i;
3208 int elem;
3209
3210 int num_elem;
3211 rtx * elems;
3212 int elem_bitsize;
3213 rtx result_s;
3214 rtvec result_v = NULL;
3215 enum mode_class outer_class;
3216 enum machine_mode outer_submode;
3217
3218 /* Some ports misuse CCmode. */
3219 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3220 return op;
3221
3222 /* Unpack the value. */
3223
3224 if (GET_CODE (op) == CONST_VECTOR)
3225 {
3226 num_elem = CONST_VECTOR_NUNITS (op);
3227 elems = &CONST_VECTOR_ELT (op, 0);
3228 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3229 }
3230 else
3231 {
3232 num_elem = 1;
3233 elems = &op;
3234 elem_bitsize = max_bitsize;
3235 }
3236
3237 if (BITS_PER_UNIT % value_bit != 0)
3238 abort (); /* Too complicated; reducing value_bit may help. */
3239 if (elem_bitsize % BITS_PER_UNIT != 0)
3240 abort (); /* I don't know how to handle endianness of sub-units. */
3241
3242 for (elem = 0; elem < num_elem; elem++)
3243 {
3244 unsigned char * vp;
3245 rtx el = elems[elem];
3246
3247 /* Vectors are kept in target memory order. (This is probably
3248 a mistake.) */
3249 {
3250 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3251 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3252 / BITS_PER_UNIT);
3253 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3254 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3255 unsigned bytele = (subword_byte % UNITS_PER_WORD
3256 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3257 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3258 }
3259
3260 switch (GET_CODE (el))
3261 {
3262 case CONST_INT:
3263 for (i = 0;
3264 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3265 i += value_bit)
3266 *vp++ = INTVAL (el) >> i;
3267 /* CONST_INTs are always logically sign-extended. */
3268 for (; i < elem_bitsize; i += value_bit)
3269 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3270 break;
3271
3272 case CONST_DOUBLE:
3273 if (GET_MODE (el) == VOIDmode)
3274 {
3275 /* If this triggers, someone should have generated a
3276 CONST_INT instead. */
3277 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3278 abort ();
3279
3280 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3281 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3282 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3283 {
3284 *vp++
3285 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3286 i += value_bit;
3287 }
3288 /* It shouldn't matter what's done here, so fill it with
3289 zero. */
3290 for (; i < max_bitsize; i += value_bit)
3291 *vp++ = 0;
3292 }
3293 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3294 {
3295 long tmp[max_bitsize / 32];
3296 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3297
3298 if (bitsize > elem_bitsize)
3299 abort ();
3300 if (bitsize % value_bit != 0)
3301 abort ();
3302
3303 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3304 GET_MODE (el));
3305
3306 /* real_to_target produces its result in words affected by
3307 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3308 and use WORDS_BIG_ENDIAN instead; see the documentation
3309 of SUBREG in rtl.texi. */
3310 for (i = 0; i < bitsize; i += value_bit)
3311 {
3312 int ibase;
3313 if (WORDS_BIG_ENDIAN)
3314 ibase = bitsize - 1 - i;
3315 else
3316 ibase = i;
3317 *vp++ = tmp[ibase / 32] >> i % 32;
3318 }
3319
3320 /* It shouldn't matter what's done here, so fill it with
3321 zero. */
3322 for (; i < elem_bitsize; i += value_bit)
3323 *vp++ = 0;
3324 }
3325 else
3326 abort ();
3327 break;
3328
3329 default:
3330 abort ();
3331 }
3332 }
3333
3334 /* Now, pick the right byte to start with. */
3335 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3336 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3337 will already have offset 0. */
3338 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3339 {
3340 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3341 - byte);
3342 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3343 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3344 byte = (subword_byte % UNITS_PER_WORD
3345 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3346 }
3347
3348 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3349 so if it's become negative it will instead be very large.) */
3350 if (byte >= GET_MODE_SIZE (innermode))
3351 abort ();
3352
3353 /* Convert from bytes to chunks of size value_bit. */
3354 value_start = byte * (BITS_PER_UNIT / value_bit);
3355
3356 /* Re-pack the value. */
3357
3358 if (VECTOR_MODE_P (outermode))
3359 {
3360 num_elem = GET_MODE_NUNITS (outermode);
3361 result_v = rtvec_alloc (num_elem);
3362 elems = &RTVEC_ELT (result_v, 0);
3363 outer_submode = GET_MODE_INNER (outermode);
3364 }
3365 else
3366 {
3367 num_elem = 1;
3368 elems = &result_s;
3369 outer_submode = outermode;
3370 }
3371
3372 outer_class = GET_MODE_CLASS (outer_submode);
3373 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3374
3375 if (elem_bitsize % value_bit != 0)
3376 abort ();
3377 if (elem_bitsize + value_start * value_bit > max_bitsize)
3378 abort ();
3379
3380 for (elem = 0; elem < num_elem; elem++)
3381 {
3382 unsigned char *vp;
3383
3384 /* Vectors are stored in target memory order. (This is probably
3385 a mistake.) */
3386 {
3387 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3388 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3389 / BITS_PER_UNIT);
3390 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3391 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3392 unsigned bytele = (subword_byte % UNITS_PER_WORD
3393 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3394 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3395 }
3396
3397 switch (outer_class)
3398 {
3399 case MODE_INT:
3400 case MODE_PARTIAL_INT:
3401 {
3402 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3403
3404 for (i = 0;
3405 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3406 i += value_bit)
3407 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3408 for (; i < elem_bitsize; i += value_bit)
3409 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3410 << (i - HOST_BITS_PER_WIDE_INT));
3411
3412 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3413 know why. */
3414 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3415 elems[elem] = gen_int_mode (lo, outer_submode);
3416 else
3417 elems[elem] = immed_double_const (lo, hi, outer_submode);
3418 }
3419 break;
3420
3421 case MODE_FLOAT:
3422 {
3423 REAL_VALUE_TYPE r;
3424 long tmp[max_bitsize / 32];
3425
3426 /* real_from_target wants its input in words affected by
3427 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3428 and use WORDS_BIG_ENDIAN instead; see the documentation
3429 of SUBREG in rtl.texi. */
3430 for (i = 0; i < max_bitsize / 32; i++)
3431 tmp[i] = 0;
3432 for (i = 0; i < elem_bitsize; i += value_bit)
3433 {
3434 int ibase;
3435 if (WORDS_BIG_ENDIAN)
3436 ibase = elem_bitsize - 1 - i;
3437 else
3438 ibase = i;
3439 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3440 }
3441
3442 real_from_target (&r, tmp, outer_submode);
3443 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3444 }
3445 break;
3446
3447 default:
3448 abort ();
3449 }
3450 }
3451 if (VECTOR_MODE_P (outermode))
3452 return gen_rtx_CONST_VECTOR (outermode, result_v);
3453 else
3454 return result_s;
3455 }
3456
3457 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3458 Return 0 if no simplifications are possible. */
3459 rtx
3460 simplify_subreg (enum machine_mode outermode, rtx op,
3461 enum machine_mode innermode, unsigned int byte)
3462 {
3463 /* Little bit of sanity checking. */
3464 if (innermode == VOIDmode || outermode == VOIDmode
3465 || innermode == BLKmode || outermode == BLKmode)
3466 abort ();
3467
3468 if (GET_MODE (op) != innermode
3469 && GET_MODE (op) != VOIDmode)
3470 abort ();
3471
3472 if (byte % GET_MODE_SIZE (outermode)
3473 || byte >= GET_MODE_SIZE (innermode))
3474 abort ();
3475
3476 if (outermode == innermode && !byte)
3477 return op;
3478
3479 if (GET_CODE (op) == CONST_INT
3480 || GET_CODE (op) == CONST_DOUBLE
3481 || GET_CODE (op) == CONST_VECTOR)
3482 return simplify_immed_subreg (outermode, op, innermode, byte);
3483
3484 /* Changing mode twice with SUBREG => just change it once,
3485 or not at all if changing back op starting mode. */
3486 if (GET_CODE (op) == SUBREG)
3487 {
3488 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3489 int final_offset = byte + SUBREG_BYTE (op);
3490 rtx new;
3491
3492 if (outermode == innermostmode
3493 && byte == 0 && SUBREG_BYTE (op) == 0)
3494 return SUBREG_REG (op);
3495
3496 /* The SUBREG_BYTE represents offset, as if the value were stored
3497 in memory. Irritating exception is paradoxical subreg, where
3498 we define SUBREG_BYTE to be 0. On big endian machines, this
3499 value should be negative. For a moment, undo this exception. */
3500 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3501 {
3502 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3503 if (WORDS_BIG_ENDIAN)
3504 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3505 if (BYTES_BIG_ENDIAN)
3506 final_offset += difference % UNITS_PER_WORD;
3507 }
3508 if (SUBREG_BYTE (op) == 0
3509 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3510 {
3511 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3512 if (WORDS_BIG_ENDIAN)
3513 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3514 if (BYTES_BIG_ENDIAN)
3515 final_offset += difference % UNITS_PER_WORD;
3516 }
3517
3518 /* See whether resulting subreg will be paradoxical. */
3519 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3520 {
3521 /* In nonparadoxical subregs we can't handle negative offsets. */
3522 if (final_offset < 0)
3523 return NULL_RTX;
3524 /* Bail out in case resulting subreg would be incorrect. */
3525 if (final_offset % GET_MODE_SIZE (outermode)
3526 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3527 return NULL_RTX;
3528 }
3529 else
3530 {
3531 int offset = 0;
3532 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3533
3534 /* In paradoxical subreg, see if we are still looking on lower part.
3535 If so, our SUBREG_BYTE will be 0. */
3536 if (WORDS_BIG_ENDIAN)
3537 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3538 if (BYTES_BIG_ENDIAN)
3539 offset += difference % UNITS_PER_WORD;
3540 if (offset == final_offset)
3541 final_offset = 0;
3542 else
3543 return NULL_RTX;
3544 }
3545
3546 /* Recurse for further possible simplifications. */
3547 new = simplify_subreg (outermode, SUBREG_REG (op),
3548 GET_MODE (SUBREG_REG (op)),
3549 final_offset);
3550 if (new)
3551 return new;
3552 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3553 }
3554
3555 /* SUBREG of a hard register => just change the register number
3556 and/or mode. If the hard register is not valid in that mode,
3557 suppress this simplification. If the hard register is the stack,
3558 frame, or argument pointer, leave this as a SUBREG. */
3559
3560 if (REG_P (op)
3561 && (! REG_FUNCTION_VALUE_P (op)
3562 || ! rtx_equal_function_value_matters)
3563 && REGNO (op) < FIRST_PSEUDO_REGISTER
3564 #ifdef CANNOT_CHANGE_MODE_CLASS
3565 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3566 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3567 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3568 #endif
3569 && ((reload_completed && !frame_pointer_needed)
3570 || (REGNO (op) != FRAME_POINTER_REGNUM
3571 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3572 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3573 #endif
3574 ))
3575 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3576 && REGNO (op) != ARG_POINTER_REGNUM
3577 #endif
3578 && REGNO (op) != STACK_POINTER_REGNUM
3579 && subreg_offset_representable_p (REGNO (op), innermode,
3580 byte, outermode))
3581 {
3582 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3583 int final_regno = subreg_hard_regno (tem, 0);
3584
3585 /* ??? We do allow it if the current REG is not valid for
3586 its mode. This is a kludge to work around how float/complex
3587 arguments are passed on 32-bit SPARC and should be fixed. */
3588 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3589 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3590 {
3591 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3592
3593 /* Propagate original regno. We don't have any way to specify
3594 the offset inside original regno, so do so only for lowpart.
3595 The information is used only by alias analysis that can not
3596 grog partial register anyway. */
3597
3598 if (subreg_lowpart_offset (outermode, innermode) == byte)
3599 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3600 return x;
3601 }
3602 }
3603
3604 /* If we have a SUBREG of a register that we are replacing and we are
3605 replacing it with a MEM, make a new MEM and try replacing the
3606 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3607 or if we would be widening it. */
3608
3609 if (GET_CODE (op) == MEM
3610 && ! mode_dependent_address_p (XEXP (op, 0))
3611 /* Allow splitting of volatile memory references in case we don't
3612 have instruction to move the whole thing. */
3613 && (! MEM_VOLATILE_P (op)
3614 || ! have_insn_for (SET, innermode))
3615 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3616 return adjust_address_nv (op, outermode, byte);
3617
3618 /* Handle complex values represented as CONCAT
3619 of real and imaginary part. */
3620 if (GET_CODE (op) == CONCAT)
3621 {
3622 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3623 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3624 unsigned int final_offset;
3625 rtx res;
3626
3627 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3628 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3629 if (res)
3630 return res;
3631 /* We can at least simplify it by referring directly to the
3632 relevant part. */
3633 return gen_rtx_SUBREG (outermode, part, final_offset);
3634 }
3635
3636 /* Optimize SUBREG truncations of zero and sign extended values. */
3637 if ((GET_CODE (op) == ZERO_EXTEND
3638 || GET_CODE (op) == SIGN_EXTEND)
3639 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3640 {
3641 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3642
3643 /* If we're requesting the lowpart of a zero or sign extension,
3644 there are three possibilities. If the outermode is the same
3645 as the origmode, we can omit both the extension and the subreg.
3646 If the outermode is not larger than the origmode, we can apply
3647 the truncation without the extension. Finally, if the outermode
3648 is larger than the origmode, but both are integer modes, we
3649 can just extend to the appropriate mode. */
3650 if (bitpos == 0)
3651 {
3652 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3653 if (outermode == origmode)
3654 return XEXP (op, 0);
3655 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3656 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3657 subreg_lowpart_offset (outermode,
3658 origmode));
3659 if (SCALAR_INT_MODE_P (outermode))
3660 return simplify_gen_unary (GET_CODE (op), outermode,
3661 XEXP (op, 0), origmode);
3662 }
3663
3664 /* A SUBREG resulting from a zero extension may fold to zero if
3665 it extracts higher bits that the ZERO_EXTEND's source bits. */
3666 if (GET_CODE (op) == ZERO_EXTEND
3667 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3668 return CONST0_RTX (outermode);
3669 }
3670
3671 return NULL_RTX;
3672 }
3673
3674 /* Make a SUBREG operation or equivalent if it folds. */
3675
3676 rtx
3677 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3678 enum machine_mode innermode, unsigned int byte)
3679 {
3680 rtx new;
3681 /* Little bit of sanity checking. */
3682 if (innermode == VOIDmode || outermode == VOIDmode
3683 || innermode == BLKmode || outermode == BLKmode)
3684 abort ();
3685
3686 if (GET_MODE (op) != innermode
3687 && GET_MODE (op) != VOIDmode)
3688 abort ();
3689
3690 if (byte % GET_MODE_SIZE (outermode)
3691 || byte >= GET_MODE_SIZE (innermode))
3692 abort ();
3693
3694 if (GET_CODE (op) == QUEUED)
3695 return NULL_RTX;
3696
3697 new = simplify_subreg (outermode, op, innermode, byte);
3698 if (new)
3699 return new;
3700
3701 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3702 return NULL_RTX;
3703
3704 return gen_rtx_SUBREG (outermode, op, byte);
3705 }
3706 /* Simplify X, an rtx expression.
3707
3708 Return the simplified expression or NULL if no simplifications
3709 were possible.
3710
3711 This is the preferred entry point into the simplification routines;
3712 however, we still allow passes to call the more specific routines.
3713
3714 Right now GCC has three (yes, three) major bodies of RTL simplification
3715 code that need to be unified.
3716
3717 1. fold_rtx in cse.c. This code uses various CSE specific
3718 information to aid in RTL simplification.
3719
3720 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3721 it uses combine specific information to aid in RTL
3722 simplification.
3723
3724 3. The routines in this file.
3725
3726
3727 Long term we want to only have one body of simplification code; to
3728 get to that state I recommend the following steps:
3729
3730 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3731 which are not pass dependent state into these routines.
3732
3733 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3734 use this routine whenever possible.
3735
3736 3. Allow for pass dependent state to be provided to these
3737 routines and add simplifications based on the pass dependent
3738 state. Remove code from cse.c & combine.c that becomes
3739 redundant/dead.
3740
3741 It will take time, but ultimately the compiler will be easier to
3742 maintain and improve. It's totally silly that when we add a
3743 simplification that it needs to be added to 4 places (3 for RTL
3744 simplification and 1 for tree simplification. */
3745
3746 rtx
3747 simplify_rtx (rtx x)
3748 {
3749 enum rtx_code code = GET_CODE (x);
3750 enum machine_mode mode = GET_MODE (x);
3751
3752 switch (GET_RTX_CLASS (code))
3753 {
3754 case RTX_UNARY:
3755 return simplify_unary_operation (code, mode,
3756 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3757 case RTX_COMM_ARITH:
3758 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3759 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3760
3761 /* Fall through.... */
3762
3763 case RTX_BIN_ARITH:
3764 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3765
3766 case RTX_TERNARY:
3767 case RTX_BITFIELD_OPS:
3768 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3769 XEXP (x, 0), XEXP (x, 1),
3770 XEXP (x, 2));
3771
3772 case RTX_COMPARE:
3773 case RTX_COMM_COMPARE:
3774 return simplify_relational_operation (code, mode,
3775 ((GET_MODE (XEXP (x, 0))
3776 != VOIDmode)
3777 ? GET_MODE (XEXP (x, 0))
3778 : GET_MODE (XEXP (x, 1))),
3779 XEXP (x, 0),
3780 XEXP (x, 1));
3781
3782 case RTX_EXTRA:
3783 if (code == SUBREG)
3784 return simplify_gen_subreg (mode, SUBREG_REG (x),
3785 GET_MODE (SUBREG_REG (x)),
3786 SUBREG_BYTE (x));
3787 if (code == CONSTANT_P_RTX)
3788 {
3789 if (CONSTANT_P (XEXP (x, 0)))
3790 return const1_rtx;
3791 }
3792 break;
3793
3794 case RTX_OBJ:
3795 if (code == LO_SUM)
3796 {
3797 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3798 if (GET_CODE (XEXP (x, 0)) == HIGH
3799 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3800 return XEXP (x, 1);
3801 }
3802 break;
3803
3804 default:
3805 break;
3806 }
3807 return NULL;
3808 }