re PR c++/13635 (ICE in register_specialization for specific order of template specia...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static bool associative_constant_p (rtx);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 \f
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, rtx i)
66 {
67 return gen_int_mode (- INTVAL (i), mode);
68 }
69
70 \f
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
73
74 rtx
75 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
76 rtx op1)
77 {
78 rtx tem;
79
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
84
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
87 if (tem)
88 return tem;
89
90 /* Handle addition and subtraction specially. Otherwise, just form
91 the operation. */
92
93 if (code == PLUS || code == MINUS)
94 {
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
96 if (tem)
97 return tem;
98 }
99
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 }
102 \f
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
105 rtx
106 avoid_constant_pool_reference (rtx x)
107 {
108 rtx c, tmp, addr;
109 enum machine_mode cmode;
110
111 switch (GET_CODE (x))
112 {
113 case MEM:
114 break;
115
116 case FLOAT_EXTEND:
117 /* Handle float extensions of constant pool references. */
118 tmp = XEXP (x, 0);
119 c = avoid_constant_pool_reference (tmp);
120 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
121 {
122 REAL_VALUE_TYPE d;
123
124 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
126 }
127 return x;
128
129 default:
130 return x;
131 }
132
133 addr = XEXP (x, 0);
134
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr = (*targetm.delegitimize_address) (addr);
137
138 if (GET_CODE (addr) == LO_SUM)
139 addr = XEXP (addr, 1);
140
141 if (GET_CODE (addr) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr))
143 return x;
144
145 c = get_pool_constant (addr);
146 cmode = get_pool_mode (addr);
147
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode != GET_MODE (x))
152 {
153 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
154 return c ? c : x;
155 }
156
157 return c;
158 }
159 \f
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
162
163 rtx
164 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165 enum machine_mode op_mode)
166 {
167 rtx tem;
168
169 /* If this simplifies, use it. */
170 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171 return tem;
172
173 return gen_rtx_fmt_e (code, mode, op);
174 }
175
176 /* Likewise for ternary operations. */
177
178 rtx
179 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
181 {
182 rtx tem;
183
184 /* If this simplifies, use it. */
185 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
186 op0, op1, op2)))
187 return tem;
188
189 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 }
191 \f
192 /* Likewise, for relational operations.
193 CMP_MODE specifies mode comparison is done in.
194 */
195
196 rtx
197 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198 enum machine_mode cmp_mode, rtx op0, rtx op1)
199 {
200 rtx tem;
201
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op0);
204 if (cmp_mode == VOIDmode)
205 cmp_mode = GET_MODE (op1);
206
207 if (cmp_mode != VOIDmode)
208 {
209 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
210
211 if (tem)
212 {
213 #ifdef FLOAT_STORE_FLAG_VALUE
214 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
215 {
216 REAL_VALUE_TYPE val;
217 if (tem == const0_rtx)
218 return CONST0_RTX (mode);
219 if (tem != const_true_rtx)
220 abort ();
221 val = FLOAT_STORE_FLAG_VALUE (mode);
222 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
223 }
224 #endif
225 return tem;
226 }
227 }
228
229 /* For the following tests, ensure const0_rtx is op1. */
230 if (swap_commutative_operands_p (op0, op1)
231 || (op0 == const0_rtx && op1 != const0_rtx))
232 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
233
234 /* If op0 is a compare, extract the comparison arguments from it. */
235 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
236 return simplify_gen_relational (code, mode, VOIDmode,
237 XEXP (op0, 0), XEXP (op0, 1));
238
239 /* If op0 is a comparison, extract the comparison arguments form it. */
240 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
241 {
242 if (code == NE)
243 {
244 if (GET_MODE (op0) == mode)
245 return op0;
246 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
247 XEXP (op0, 0), XEXP (op0, 1));
248 }
249 else if (code == EQ)
250 {
251 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
252 if (new != UNKNOWN)
253 return simplify_gen_relational (new, mode, VOIDmode,
254 XEXP (op0, 0), XEXP (op0, 1));
255 }
256 }
257
258 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 }
260 \f
261 /* Replace all occurrences of OLD in X with NEW and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
263
264 rtx
265 simplify_replace_rtx (rtx x, rtx old, rtx new)
266 {
267 enum rtx_code code = GET_CODE (x);
268 enum machine_mode mode = GET_MODE (x);
269 enum machine_mode op_mode;
270 rtx op0, op1, op2;
271
272 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
275
276 if (x == old)
277 return new;
278
279 switch (GET_RTX_CLASS (code))
280 {
281 case '1':
282 op0 = XEXP (x, 0);
283 op_mode = GET_MODE (op0);
284 op0 = simplify_replace_rtx (op0, old, new);
285 if (op0 == XEXP (x, 0))
286 return x;
287 return simplify_gen_unary (code, mode, op0, op_mode);
288
289 case '2':
290 case 'c':
291 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
292 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
293 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294 return x;
295 return simplify_gen_binary (code, mode, op0, op1);
296
297 case '<':
298 op0 = XEXP (x, 0);
299 op1 = XEXP (x, 1);
300 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
301 op0 = simplify_replace_rtx (op0, old, new);
302 op1 = simplify_replace_rtx (op1, old, new);
303 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304 return x;
305 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306
307 case '3':
308 case 'b':
309 op0 = XEXP (x, 0);
310 op_mode = GET_MODE (op0);
311 op0 = simplify_replace_rtx (op0, old, new);
312 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
313 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
314 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315 return x;
316 if (op_mode == VOIDmode)
317 op_mode = GET_MODE (op0);
318 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319
320 case 'x':
321 /* The only case we try to handle is a SUBREG. */
322 if (code == SUBREG)
323 {
324 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
325 if (op0 == SUBREG_REG (x))
326 return x;
327 op0 = simplify_gen_subreg (GET_MODE (x), op0,
328 GET_MODE (SUBREG_REG (x)),
329 SUBREG_BYTE (x));
330 return op0 ? op0 : x;
331 }
332 break;
333
334 case 'o':
335 if (code == MEM)
336 {
337 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
338 if (op0 == XEXP (x, 0))
339 return x;
340 return replace_equiv_address_nv (x, op0);
341 }
342 else if (code == LO_SUM)
343 {
344 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
345 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
346
347 /* (lo_sum (high x) x) -> x */
348 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 return op1;
350
351 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return x;
353 return gen_rtx_LO_SUM (mode, op0, op1);
354 }
355 else if (code == REG)
356 {
357 if (REG_P (old) && REGNO (x) == REGNO (old))
358 return new;
359 }
360 break;
361
362 default:
363 break;
364 }
365 return x;
366 }
367 \f
368 /* Try to simplify a unary operation CODE whose output mode is to be
369 MODE with input operand OP whose mode was originally OP_MODE.
370 Return zero if no simplification can be made. */
371 rtx
372 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
373 rtx op, enum machine_mode op_mode)
374 {
375 unsigned int width = GET_MODE_BITSIZE (mode);
376 rtx trueop = avoid_constant_pool_reference (op);
377
378 if (code == VEC_DUPLICATE)
379 {
380 if (!VECTOR_MODE_P (mode))
381 abort ();
382 if (GET_MODE (trueop) != VOIDmode
383 && !VECTOR_MODE_P (GET_MODE (trueop))
384 && GET_MODE_INNER (mode) != GET_MODE (trueop))
385 abort ();
386 if (GET_MODE (trueop) != VOIDmode
387 && VECTOR_MODE_P (GET_MODE (trueop))
388 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
389 abort ();
390 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
391 || GET_CODE (trueop) == CONST_VECTOR)
392 {
393 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
394 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
395 rtvec v = rtvec_alloc (n_elts);
396 unsigned int i;
397
398 if (GET_CODE (trueop) != CONST_VECTOR)
399 for (i = 0; i < n_elts; i++)
400 RTVEC_ELT (v, i) = trueop;
401 else
402 {
403 enum machine_mode inmode = GET_MODE (trueop);
404 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
405 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
406
407 if (in_n_elts >= n_elts || n_elts % in_n_elts)
408 abort ();
409 for (i = 0; i < n_elts; i++)
410 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
411 }
412 return gen_rtx_CONST_VECTOR (mode, v);
413 }
414 }
415 else if (GET_CODE (op) == CONST)
416 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
417
418 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
419 {
420 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
421 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
422 enum machine_mode opmode = GET_MODE (trueop);
423 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
424 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
425 rtvec v = rtvec_alloc (n_elts);
426 unsigned int i;
427
428 if (op_n_elts != n_elts)
429 abort ();
430
431 for (i = 0; i < n_elts; i++)
432 {
433 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
434 CONST_VECTOR_ELT (trueop, i),
435 GET_MODE_INNER (opmode));
436 if (!x)
437 return 0;
438 RTVEC_ELT (v, i) = x;
439 }
440 return gen_rtx_CONST_VECTOR (mode, v);
441 }
442
443 /* The order of these tests is critical so that, for example, we don't
444 check the wrong mode (input vs. output) for a conversion operation,
445 such as FIX. At some point, this should be simplified. */
446
447 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
448 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
449 {
450 HOST_WIDE_INT hv, lv;
451 REAL_VALUE_TYPE d;
452
453 if (GET_CODE (trueop) == CONST_INT)
454 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455 else
456 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457
458 REAL_VALUE_FROM_INT (d, lv, hv, mode);
459 d = real_value_truncate (mode, d);
460 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 }
462 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
463 && (GET_CODE (trueop) == CONST_DOUBLE
464 || GET_CODE (trueop) == CONST_INT))
465 {
466 HOST_WIDE_INT hv, lv;
467 REAL_VALUE_TYPE d;
468
469 if (GET_CODE (trueop) == CONST_INT)
470 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
471 else
472 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
473
474 if (op_mode == VOIDmode)
475 {
476 /* We don't know how to interpret negative-looking numbers in
477 this case, so don't try to fold those. */
478 if (hv < 0)
479 return 0;
480 }
481 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
482 ;
483 else
484 hv = 0, lv &= GET_MODE_MASK (op_mode);
485
486 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
487 d = real_value_truncate (mode, d);
488 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
489 }
490
491 if (GET_CODE (trueop) == CONST_INT
492 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
493 {
494 HOST_WIDE_INT arg0 = INTVAL (trueop);
495 HOST_WIDE_INT val;
496
497 switch (code)
498 {
499 case NOT:
500 val = ~ arg0;
501 break;
502
503 case NEG:
504 val = - arg0;
505 break;
506
507 case ABS:
508 val = (arg0 >= 0 ? arg0 : - arg0);
509 break;
510
511 case FFS:
512 /* Don't use ffs here. Instead, get low order bit and then its
513 number. If arg0 is zero, this will return 0, as desired. */
514 arg0 &= GET_MODE_MASK (mode);
515 val = exact_log2 (arg0 & (- arg0)) + 1;
516 break;
517
518 case CLZ:
519 arg0 &= GET_MODE_MASK (mode);
520 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
521 ;
522 else
523 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
524 break;
525
526 case CTZ:
527 arg0 &= GET_MODE_MASK (mode);
528 if (arg0 == 0)
529 {
530 /* Even if the value at zero is undefined, we have to come
531 up with some replacement. Seems good enough. */
532 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
533 val = GET_MODE_BITSIZE (mode);
534 }
535 else
536 val = exact_log2 (arg0 & -arg0);
537 break;
538
539 case POPCOUNT:
540 arg0 &= GET_MODE_MASK (mode);
541 val = 0;
542 while (arg0)
543 val++, arg0 &= arg0 - 1;
544 break;
545
546 case PARITY:
547 arg0 &= GET_MODE_MASK (mode);
548 val = 0;
549 while (arg0)
550 val++, arg0 &= arg0 - 1;
551 val &= 1;
552 break;
553
554 case TRUNCATE:
555 val = arg0;
556 break;
557
558 case ZERO_EXTEND:
559 /* When zero-extending a CONST_INT, we need to know its
560 original mode. */
561 if (op_mode == VOIDmode)
562 abort ();
563 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
564 {
565 /* If we were really extending the mode,
566 we would have to distinguish between zero-extension
567 and sign-extension. */
568 if (width != GET_MODE_BITSIZE (op_mode))
569 abort ();
570 val = arg0;
571 }
572 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
573 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
574 else
575 return 0;
576 break;
577
578 case SIGN_EXTEND:
579 if (op_mode == VOIDmode)
580 op_mode = mode;
581 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
582 {
583 /* If we were really extending the mode,
584 we would have to distinguish between zero-extension
585 and sign-extension. */
586 if (width != GET_MODE_BITSIZE (op_mode))
587 abort ();
588 val = arg0;
589 }
590 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
591 {
592 val
593 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
594 if (val
595 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
596 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
597 }
598 else
599 return 0;
600 break;
601
602 case SQRT:
603 case FLOAT_EXTEND:
604 case FLOAT_TRUNCATE:
605 case SS_TRUNCATE:
606 case US_TRUNCATE:
607 return 0;
608
609 default:
610 abort ();
611 }
612
613 val = trunc_int_for_mode (val, mode);
614
615 return GEN_INT (val);
616 }
617
618 /* We can do some operations on integer CONST_DOUBLEs. Also allow
619 for a DImode operation on a CONST_INT. */
620 else if (GET_MODE (trueop) == VOIDmode
621 && width <= HOST_BITS_PER_WIDE_INT * 2
622 && (GET_CODE (trueop) == CONST_DOUBLE
623 || GET_CODE (trueop) == CONST_INT))
624 {
625 unsigned HOST_WIDE_INT l1, lv;
626 HOST_WIDE_INT h1, hv;
627
628 if (GET_CODE (trueop) == CONST_DOUBLE)
629 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
630 else
631 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
632
633 switch (code)
634 {
635 case NOT:
636 lv = ~ l1;
637 hv = ~ h1;
638 break;
639
640 case NEG:
641 neg_double (l1, h1, &lv, &hv);
642 break;
643
644 case ABS:
645 if (h1 < 0)
646 neg_double (l1, h1, &lv, &hv);
647 else
648 lv = l1, hv = h1;
649 break;
650
651 case FFS:
652 hv = 0;
653 if (l1 == 0)
654 {
655 if (h1 == 0)
656 lv = 0;
657 else
658 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
659 }
660 else
661 lv = exact_log2 (l1 & -l1) + 1;
662 break;
663
664 case CLZ:
665 hv = 0;
666 if (h1 != 0)
667 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
668 - HOST_BITS_PER_WIDE_INT;
669 else if (l1 != 0)
670 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
671 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
672 lv = GET_MODE_BITSIZE (mode);
673 break;
674
675 case CTZ:
676 hv = 0;
677 if (l1 != 0)
678 lv = exact_log2 (l1 & -l1);
679 else if (h1 != 0)
680 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
681 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
682 lv = GET_MODE_BITSIZE (mode);
683 break;
684
685 case POPCOUNT:
686 hv = 0;
687 lv = 0;
688 while (l1)
689 lv++, l1 &= l1 - 1;
690 while (h1)
691 lv++, h1 &= h1 - 1;
692 break;
693
694 case PARITY:
695 hv = 0;
696 lv = 0;
697 while (l1)
698 lv++, l1 &= l1 - 1;
699 while (h1)
700 lv++, h1 &= h1 - 1;
701 lv &= 1;
702 break;
703
704 case TRUNCATE:
705 /* This is just a change-of-mode, so do nothing. */
706 lv = l1, hv = h1;
707 break;
708
709 case ZERO_EXTEND:
710 if (op_mode == VOIDmode)
711 abort ();
712
713 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
714 return 0;
715
716 hv = 0;
717 lv = l1 & GET_MODE_MASK (op_mode);
718 break;
719
720 case SIGN_EXTEND:
721 if (op_mode == VOIDmode
722 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
723 return 0;
724 else
725 {
726 lv = l1 & GET_MODE_MASK (op_mode);
727 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
728 && (lv & ((HOST_WIDE_INT) 1
729 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
730 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
731
732 hv = HWI_SIGN_EXTEND (lv);
733 }
734 break;
735
736 case SQRT:
737 return 0;
738
739 default:
740 return 0;
741 }
742
743 return immed_double_const (lv, hv, mode);
744 }
745
746 else if (GET_CODE (trueop) == CONST_DOUBLE
747 && GET_MODE_CLASS (mode) == MODE_FLOAT)
748 {
749 REAL_VALUE_TYPE d, t;
750 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
751
752 switch (code)
753 {
754 case SQRT:
755 if (HONOR_SNANS (mode) && real_isnan (&d))
756 return 0;
757 real_sqrt (&t, mode, &d);
758 d = t;
759 break;
760 case ABS:
761 d = REAL_VALUE_ABS (d);
762 break;
763 case NEG:
764 d = REAL_VALUE_NEGATE (d);
765 break;
766 case FLOAT_TRUNCATE:
767 d = real_value_truncate (mode, d);
768 break;
769 case FLOAT_EXTEND:
770 /* All this does is change the mode. */
771 break;
772 case FIX:
773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
774 break;
775
776 default:
777 abort ();
778 }
779 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
780 }
781
782 else if (GET_CODE (trueop) == CONST_DOUBLE
783 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
784 && GET_MODE_CLASS (mode) == MODE_INT
785 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
786 {
787 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
788 operators are intentionally left unspecified (to ease implementation
789 by target backends), for consistency, this routine implements the
790 same semantics for constant folding as used by the middle-end. */
791
792 HOST_WIDE_INT xh, xl, th, tl;
793 REAL_VALUE_TYPE x, t;
794 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
795 switch (code)
796 {
797 case FIX:
798 if (REAL_VALUE_ISNAN (x))
799 return const0_rtx;
800
801 /* Test against the signed upper bound. */
802 if (width > HOST_BITS_PER_WIDE_INT)
803 {
804 th = ((unsigned HOST_WIDE_INT) 1
805 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
806 tl = -1;
807 }
808 else
809 {
810 th = 0;
811 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
812 }
813 real_from_integer (&t, VOIDmode, tl, th, 0);
814 if (REAL_VALUES_LESS (t, x))
815 {
816 xh = th;
817 xl = tl;
818 break;
819 }
820
821 /* Test against the signed lower bound. */
822 if (width > HOST_BITS_PER_WIDE_INT)
823 {
824 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
825 tl = 0;
826 }
827 else
828 {
829 th = -1;
830 tl = (HOST_WIDE_INT) -1 << (width - 1);
831 }
832 real_from_integer (&t, VOIDmode, tl, th, 0);
833 if (REAL_VALUES_LESS (x, t))
834 {
835 xh = th;
836 xl = tl;
837 break;
838 }
839 REAL_VALUE_TO_INT (&xl, &xh, x);
840 break;
841
842 case UNSIGNED_FIX:
843 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
844 return const0_rtx;
845
846 /* Test against the unsigned upper bound. */
847 if (width == 2*HOST_BITS_PER_WIDE_INT)
848 {
849 th = -1;
850 tl = -1;
851 }
852 else if (width >= HOST_BITS_PER_WIDE_INT)
853 {
854 th = ((unsigned HOST_WIDE_INT) 1
855 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
856 tl = -1;
857 }
858 else
859 {
860 th = 0;
861 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
862 }
863 real_from_integer (&t, VOIDmode, tl, th, 1);
864 if (REAL_VALUES_LESS (t, x))
865 {
866 xh = th;
867 xl = tl;
868 break;
869 }
870
871 REAL_VALUE_TO_INT (&xl, &xh, x);
872 break;
873
874 default:
875 abort ();
876 }
877 return immed_double_const (xl, xh, mode);
878 }
879
880 /* This was formerly used only for non-IEEE float.
881 eggert@twinsun.com says it is safe for IEEE also. */
882 else
883 {
884 enum rtx_code reversed;
885 rtx temp;
886
887 /* There are some simplifications we can do even if the operands
888 aren't constant. */
889 switch (code)
890 {
891 case NOT:
892 /* (not (not X)) == X. */
893 if (GET_CODE (op) == NOT)
894 return XEXP (op, 0);
895
896 /* (not (eq X Y)) == (ne X Y), etc. */
897 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
898 && (mode == BImode || STORE_FLAG_VALUE == -1)
899 && ((reversed = reversed_comparison_code (op, NULL_RTX))
900 != UNKNOWN))
901 return simplify_gen_relational (reversed, mode, VOIDmode,
902 XEXP (op, 0), XEXP (op, 1));
903
904 /* (not (plus X -1)) can become (neg X). */
905 if (GET_CODE (op) == PLUS
906 && XEXP (op, 1) == constm1_rtx)
907 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
908
909 /* Similarly, (not (neg X)) is (plus X -1). */
910 if (GET_CODE (op) == NEG)
911 return plus_constant (XEXP (op, 0), -1);
912
913 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == XOR
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && (temp = simplify_unary_operation (NOT, mode,
917 XEXP (op, 1),
918 mode)) != 0)
919 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920
921
922 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
923 operands other than 1, but that is not valid. We could do a
924 similar simplification for (not (lshiftrt C X)) where C is
925 just the sign bit, but this doesn't seem common enough to
926 bother with. */
927 if (GET_CODE (op) == ASHIFT
928 && XEXP (op, 0) == const1_rtx)
929 {
930 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
931 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 }
933
934 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
935 by reversing the comparison code if valid. */
936 if (STORE_FLAG_VALUE == -1
937 && GET_RTX_CLASS (GET_CODE (op)) == '<'
938 && (reversed = reversed_comparison_code (op, NULL_RTX))
939 != UNKNOWN)
940 return simplify_gen_relational (reversed, mode, VOIDmode,
941 XEXP (op, 0), XEXP (op, 1));
942
943 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
944 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
945 so we can perform the above simplification. */
946
947 if (STORE_FLAG_VALUE == -1
948 && GET_CODE (op) == ASHIFTRT
949 && GET_CODE (XEXP (op, 1)) == CONST_INT
950 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
951 return simplify_gen_relational (GE, mode, VOIDmode,
952 XEXP (op, 0), const0_rtx);
953
954 break;
955
956 case NEG:
957 /* (neg (neg X)) == X. */
958 if (GET_CODE (op) == NEG)
959 return XEXP (op, 0);
960
961 /* (neg (plus X 1)) can become (not X). */
962 if (GET_CODE (op) == PLUS
963 && XEXP (op, 1) == const1_rtx)
964 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965
966 /* Similarly, (neg (not X)) is (plus X 1). */
967 if (GET_CODE (op) == NOT)
968 return plus_constant (XEXP (op, 0), 1);
969
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
979 XEXP (op, 0));
980
981 if (GET_CODE (op) == PLUS
982 && !HONOR_SIGNED_ZEROS (mode)
983 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 {
985 /* (neg (plus A C)) is simplified to (minus -C A). */
986 if (GET_CODE (XEXP (op, 1)) == CONST_INT
987 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
988 {
989 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
990 mode);
991 if (temp)
992 return simplify_gen_binary (MINUS, mode, temp,
993 XEXP (op, 0));
994 }
995
996 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
997 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
998 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
999 }
1000
1001 /* (neg (mult A B)) becomes (mult (neg A) B).
1002 This works even for floating-point values. */
1003 if (GET_CODE (op) == MULT
1004 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1005 {
1006 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1007 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1008 }
1009
1010 /* NEG commutes with ASHIFT since it is multiplication. Only do
1011 this if we can then eliminate the NEG (e.g., if the operand
1012 is a constant). */
1013 if (GET_CODE (op) == ASHIFT)
1014 {
1015 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1016 mode);
1017 if (temp)
1018 return simplify_gen_binary (ASHIFT, mode, temp,
1019 XEXP (op, 1));
1020 }
1021
1022 break;
1023
1024 case SIGN_EXTEND:
1025 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1026 becomes just the MINUS if its mode is MODE. This allows
1027 folding switch statements on machines using casesi (such as
1028 the VAX). */
1029 if (GET_CODE (op) == TRUNCATE
1030 && GET_MODE (XEXP (op, 0)) == mode
1031 && GET_CODE (XEXP (op, 0)) == MINUS
1032 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1033 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1034 return XEXP (op, 0);
1035
1036 /* Check for a sign extension of a subreg of a promoted
1037 variable, where the promotion is sign-extended, and the
1038 target mode is the same as the variable's promotion. */
1039 if (GET_CODE (op) == SUBREG
1040 && SUBREG_PROMOTED_VAR_P (op)
1041 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1042 && GET_MODE (XEXP (op, 0)) == mode)
1043 return XEXP (op, 0);
1044
1045 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1046 if (! POINTERS_EXTEND_UNSIGNED
1047 && mode == Pmode && GET_MODE (op) == ptr_mode
1048 && (CONSTANT_P (op)
1049 || (GET_CODE (op) == SUBREG
1050 && GET_CODE (SUBREG_REG (op)) == REG
1051 && REG_POINTER (SUBREG_REG (op))
1052 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1053 return convert_memory_address (Pmode, op);
1054 #endif
1055 break;
1056
1057 case ZERO_EXTEND:
1058 /* Check for a zero extension of a subreg of a promoted
1059 variable, where the promotion is zero-extended, and the
1060 target mode is the same as the variable's promotion. */
1061 if (GET_CODE (op) == SUBREG
1062 && SUBREG_PROMOTED_VAR_P (op)
1063 && SUBREG_PROMOTED_UNSIGNED_P (op)
1064 && GET_MODE (XEXP (op, 0)) == mode)
1065 return XEXP (op, 0);
1066
1067 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1068 if (POINTERS_EXTEND_UNSIGNED > 0
1069 && mode == Pmode && GET_MODE (op) == ptr_mode
1070 && (CONSTANT_P (op)
1071 || (GET_CODE (op) == SUBREG
1072 && GET_CODE (SUBREG_REG (op)) == REG
1073 && REG_POINTER (SUBREG_REG (op))
1074 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1075 return convert_memory_address (Pmode, op);
1076 #endif
1077 break;
1078
1079 default:
1080 break;
1081 }
1082
1083 return 0;
1084 }
1085 }
1086 \f
1087 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1088 is a suitable integer or floating point immediate constant. */
1089 static bool
1090 associative_constant_p (rtx op)
1091 {
1092 if (GET_CODE (op) == CONST_INT
1093 || GET_CODE (op) == CONST_DOUBLE)
1094 return true;
1095 op = avoid_constant_pool_reference (op);
1096 return GET_CODE (op) == CONST_INT
1097 || GET_CODE (op) == CONST_DOUBLE;
1098 }
1099
1100 /* Subroutine of simplify_binary_operation to simplify an associative
1101 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1102 Return 0 if no simplification is possible. */
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1106 {
1107 rtx tem;
1108
1109 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1110 if (GET_CODE (op0) == code
1111 && associative_constant_p (op1)
1112 && associative_constant_p (XEXP (op0, 1)))
1113 {
1114 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1115 if (! tem)
1116 return tem;
1117 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1118 }
1119
1120 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1121 if (GET_CODE (op0) == code
1122 && GET_CODE (op1) == code
1123 && associative_constant_p (XEXP (op0, 1))
1124 && associative_constant_p (XEXP (op1, 1)))
1125 {
1126 rtx c = simplify_binary_operation (code, mode,
1127 XEXP (op0, 1), XEXP (op1, 1));
1128 if (! c)
1129 return 0;
1130 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1131 return simplify_gen_binary (code, mode, tem, c);
1132 }
1133
1134 /* Canonicalize (x op c) op y as (x op y) op c. */
1135 if (GET_CODE (op0) == code
1136 && associative_constant_p (XEXP (op0, 1)))
1137 {
1138 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1139 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1140 }
1141
1142 /* Canonicalize x op (y op c) as (x op y) op c. */
1143 if (GET_CODE (op1) == code
1144 && associative_constant_p (XEXP (op1, 1)))
1145 {
1146 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1147 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1148 }
1149
1150 return 0;
1151 }
1152
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1155
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1158 rtx
1159 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1160 rtx op0, rtx op1)
1161 {
1162 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1163 HOST_WIDE_INT val;
1164 unsigned int width = GET_MODE_BITSIZE (mode);
1165 rtx tem;
1166 rtx trueop0 = avoid_constant_pool_reference (op0);
1167 rtx trueop1 = avoid_constant_pool_reference (op1);
1168
1169 /* Relational operations don't work here. We must know the mode
1170 of the operands in order to do the comparison correctly.
1171 Assuming a full word can give incorrect results.
1172 Consider comparing 128 with -128 in QImode. */
1173
1174 if (GET_RTX_CLASS (code) == '<')
1175 abort ();
1176
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code) == 'c'
1179 && swap_commutative_operands_p (trueop0, trueop1))
1180 {
1181 tem = op0, op0 = op1, op1 = tem;
1182 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1183 }
1184
1185 if (VECTOR_MODE_P (mode)
1186 && GET_CODE (trueop0) == CONST_VECTOR
1187 && GET_CODE (trueop1) == CONST_VECTOR)
1188 {
1189 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1190 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1191 enum machine_mode op0mode = GET_MODE (trueop0);
1192 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1193 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1194 enum machine_mode op1mode = GET_MODE (trueop1);
1195 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1196 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1197 rtvec v = rtvec_alloc (n_elts);
1198 unsigned int i;
1199
1200 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1201 abort ();
1202
1203 for (i = 0; i < n_elts; i++)
1204 {
1205 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206 CONST_VECTOR_ELT (trueop0, i),
1207 CONST_VECTOR_ELT (trueop1, i));
1208 if (!x)
1209 return 0;
1210 RTVEC_ELT (v, i) = x;
1211 }
1212
1213 return gen_rtx_CONST_VECTOR (mode, v);
1214 }
1215
1216 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1217 && GET_CODE (trueop0) == CONST_DOUBLE
1218 && GET_CODE (trueop1) == CONST_DOUBLE
1219 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1220 {
1221 REAL_VALUE_TYPE f0, f1, value;
1222
1223 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1224 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1225 f0 = real_value_truncate (mode, f0);
1226 f1 = real_value_truncate (mode, f1);
1227
1228 if (HONOR_SNANS (mode)
1229 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1230 return 0;
1231
1232 if (code == DIV
1233 && REAL_VALUES_EQUAL (f1, dconst0)
1234 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1235 return 0;
1236
1237 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1238
1239 value = real_value_truncate (mode, value);
1240 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1241 }
1242
1243 /* We can fold some multi-word operations. */
1244 if (GET_MODE_CLASS (mode) == MODE_INT
1245 && width == HOST_BITS_PER_WIDE_INT * 2
1246 && (GET_CODE (trueop0) == CONST_DOUBLE
1247 || GET_CODE (trueop0) == CONST_INT)
1248 && (GET_CODE (trueop1) == CONST_DOUBLE
1249 || GET_CODE (trueop1) == CONST_INT))
1250 {
1251 unsigned HOST_WIDE_INT l1, l2, lv;
1252 HOST_WIDE_INT h1, h2, hv;
1253
1254 if (GET_CODE (trueop0) == CONST_DOUBLE)
1255 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1256 else
1257 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1258
1259 if (GET_CODE (trueop1) == CONST_DOUBLE)
1260 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1261 else
1262 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1263
1264 switch (code)
1265 {
1266 case MINUS:
1267 /* A - B == A + (-B). */
1268 neg_double (l2, h2, &lv, &hv);
1269 l2 = lv, h2 = hv;
1270
1271 /* Fall through.... */
1272
1273 case PLUS:
1274 add_double (l1, h1, l2, h2, &lv, &hv);
1275 break;
1276
1277 case MULT:
1278 mul_double (l1, h1, l2, h2, &lv, &hv);
1279 break;
1280
1281 case DIV: case MOD: case UDIV: case UMOD:
1282 /* We'd need to include tree.h to do this and it doesn't seem worth
1283 it. */
1284 return 0;
1285
1286 case AND:
1287 lv = l1 & l2, hv = h1 & h2;
1288 break;
1289
1290 case IOR:
1291 lv = l1 | l2, hv = h1 | h2;
1292 break;
1293
1294 case XOR:
1295 lv = l1 ^ l2, hv = h1 ^ h2;
1296 break;
1297
1298 case SMIN:
1299 if (h1 < h2
1300 || (h1 == h2
1301 && ((unsigned HOST_WIDE_INT) l1
1302 < (unsigned HOST_WIDE_INT) l2)))
1303 lv = l1, hv = h1;
1304 else
1305 lv = l2, hv = h2;
1306 break;
1307
1308 case SMAX:
1309 if (h1 > h2
1310 || (h1 == h2
1311 && ((unsigned HOST_WIDE_INT) l1
1312 > (unsigned HOST_WIDE_INT) l2)))
1313 lv = l1, hv = h1;
1314 else
1315 lv = l2, hv = h2;
1316 break;
1317
1318 case UMIN:
1319 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1320 || (h1 == h2
1321 && ((unsigned HOST_WIDE_INT) l1
1322 < (unsigned HOST_WIDE_INT) l2)))
1323 lv = l1, hv = h1;
1324 else
1325 lv = l2, hv = h2;
1326 break;
1327
1328 case UMAX:
1329 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1330 || (h1 == h2
1331 && ((unsigned HOST_WIDE_INT) l1
1332 > (unsigned HOST_WIDE_INT) l2)))
1333 lv = l1, hv = h1;
1334 else
1335 lv = l2, hv = h2;
1336 break;
1337
1338 case LSHIFTRT: case ASHIFTRT:
1339 case ASHIFT:
1340 case ROTATE: case ROTATERT:
1341 #ifdef SHIFT_COUNT_TRUNCATED
1342 if (SHIFT_COUNT_TRUNCATED)
1343 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1344 #endif
1345
1346 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1347 return 0;
1348
1349 if (code == LSHIFTRT || code == ASHIFTRT)
1350 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1351 code == ASHIFTRT);
1352 else if (code == ASHIFT)
1353 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1354 else if (code == ROTATE)
1355 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1356 else /* code == ROTATERT */
1357 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1358 break;
1359
1360 default:
1361 return 0;
1362 }
1363
1364 return immed_double_const (lv, hv, mode);
1365 }
1366
1367 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1368 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1369 {
1370 /* Even if we can't compute a constant result,
1371 there are some cases worth simplifying. */
1372
1373 switch (code)
1374 {
1375 case PLUS:
1376 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1377 when x is NaN, infinite, or finite and nonzero. They aren't
1378 when x is -0 and the rounding mode is not towards -infinity,
1379 since (-0) + 0 is then 0. */
1380 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1381 return op0;
1382
1383 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1384 transformations are safe even for IEEE. */
1385 if (GET_CODE (op0) == NEG)
1386 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1387 else if (GET_CODE (op1) == NEG)
1388 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1389
1390 /* (~a) + 1 -> -a */
1391 if (INTEGRAL_MODE_P (mode)
1392 && GET_CODE (op0) == NOT
1393 && trueop1 == const1_rtx)
1394 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1395
1396 /* Handle both-operands-constant cases. We can only add
1397 CONST_INTs to constants since the sum of relocatable symbols
1398 can't be handled by most assemblers. Don't add CONST_INT
1399 to CONST_INT since overflow won't be computed properly if wider
1400 than HOST_BITS_PER_WIDE_INT. */
1401
1402 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1403 && GET_CODE (op1) == CONST_INT)
1404 return plus_constant (op0, INTVAL (op1));
1405 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1406 && GET_CODE (op0) == CONST_INT)
1407 return plus_constant (op1, INTVAL (op0));
1408
1409 /* See if this is something like X * C - X or vice versa or
1410 if the multiplication is written as a shift. If so, we can
1411 distribute and make a new multiply, shift, or maybe just
1412 have X (if C is 2 in the example above). But don't make
1413 real multiply if we didn't have one before. */
1414
1415 if (! FLOAT_MODE_P (mode))
1416 {
1417 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1418 rtx lhs = op0, rhs = op1;
1419 int had_mult = 0;
1420
1421 if (GET_CODE (lhs) == NEG)
1422 coeff0 = -1, lhs = XEXP (lhs, 0);
1423 else if (GET_CODE (lhs) == MULT
1424 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1425 {
1426 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1427 had_mult = 1;
1428 }
1429 else if (GET_CODE (lhs) == ASHIFT
1430 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1431 && INTVAL (XEXP (lhs, 1)) >= 0
1432 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1433 {
1434 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1435 lhs = XEXP (lhs, 0);
1436 }
1437
1438 if (GET_CODE (rhs) == NEG)
1439 coeff1 = -1, rhs = XEXP (rhs, 0);
1440 else if (GET_CODE (rhs) == MULT
1441 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1442 {
1443 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1444 had_mult = 1;
1445 }
1446 else if (GET_CODE (rhs) == ASHIFT
1447 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1448 && INTVAL (XEXP (rhs, 1)) >= 0
1449 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1450 {
1451 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1452 rhs = XEXP (rhs, 0);
1453 }
1454
1455 if (rtx_equal_p (lhs, rhs))
1456 {
1457 tem = simplify_gen_binary (MULT, mode, lhs,
1458 GEN_INT (coeff0 + coeff1));
1459 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1460 }
1461 }
1462
1463 /* If one of the operands is a PLUS or a MINUS, see if we can
1464 simplify this by the associative law.
1465 Don't use the associative law for floating point.
1466 The inaccuracy makes it nonassociative,
1467 and subtle programs can break if operations are associated. */
1468
1469 if (INTEGRAL_MODE_P (mode)
1470 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1471 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1472 || (GET_CODE (op0) == CONST
1473 && GET_CODE (XEXP (op0, 0)) == PLUS)
1474 || (GET_CODE (op1) == CONST
1475 && GET_CODE (XEXP (op1, 0)) == PLUS))
1476 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1477 return tem;
1478
1479 /* Reassociate floating point addition only when the user
1480 specifies unsafe math optimizations. */
1481 if (FLOAT_MODE_P (mode)
1482 && flag_unsafe_math_optimizations)
1483 {
1484 tem = simplify_associative_operation (code, mode, op0, op1);
1485 if (tem)
1486 return tem;
1487 }
1488 break;
1489
1490 case COMPARE:
1491 #ifdef HAVE_cc0
1492 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1493 using cc0, in which case we want to leave it as a COMPARE
1494 so we can distinguish it from a register-register-copy.
1495
1496 In IEEE floating point, x-0 is not the same as x. */
1497
1498 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1499 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1500 && trueop1 == CONST0_RTX (mode))
1501 return op0;
1502 #endif
1503
1504 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1505 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1506 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1507 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1508 {
1509 rtx xop00 = XEXP (op0, 0);
1510 rtx xop10 = XEXP (op1, 0);
1511
1512 #ifdef HAVE_cc0
1513 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1514 #else
1515 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1516 && GET_MODE (xop00) == GET_MODE (xop10)
1517 && REGNO (xop00) == REGNO (xop10)
1518 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1519 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1520 #endif
1521 return xop00;
1522 }
1523 break;
1524
1525 case MINUS:
1526 /* We can't assume x-x is 0 even with non-IEEE floating point,
1527 but since it is zero except in very strange circumstances, we
1528 will treat it as zero with -funsafe-math-optimizations. */
1529 if (rtx_equal_p (trueop0, trueop1)
1530 && ! side_effects_p (op0)
1531 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1532 return CONST0_RTX (mode);
1533
1534 /* Change subtraction from zero into negation. (0 - x) is the
1535 same as -x when x is NaN, infinite, or finite and nonzero.
1536 But if the mode has signed zeros, and does not round towards
1537 -infinity, then 0 - 0 is 0, not -0. */
1538 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1539 return simplify_gen_unary (NEG, mode, op1, mode);
1540
1541 /* (-1 - a) is ~a. */
1542 if (trueop0 == constm1_rtx)
1543 return simplify_gen_unary (NOT, mode, op1, mode);
1544
1545 /* Subtracting 0 has no effect unless the mode has signed zeros
1546 and supports rounding towards -infinity. In such a case,
1547 0 - 0 is -0. */
1548 if (!(HONOR_SIGNED_ZEROS (mode)
1549 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1550 && trueop1 == CONST0_RTX (mode))
1551 return op0;
1552
1553 /* See if this is something like X * C - X or vice versa or
1554 if the multiplication is written as a shift. If so, we can
1555 distribute and make a new multiply, shift, or maybe just
1556 have X (if C is 2 in the example above). But don't make
1557 real multiply if we didn't have one before. */
1558
1559 if (! FLOAT_MODE_P (mode))
1560 {
1561 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1562 rtx lhs = op0, rhs = op1;
1563 int had_mult = 0;
1564
1565 if (GET_CODE (lhs) == NEG)
1566 coeff0 = -1, lhs = XEXP (lhs, 0);
1567 else if (GET_CODE (lhs) == MULT
1568 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1569 {
1570 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1571 had_mult = 1;
1572 }
1573 else if (GET_CODE (lhs) == ASHIFT
1574 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1575 && INTVAL (XEXP (lhs, 1)) >= 0
1576 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1577 {
1578 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1579 lhs = XEXP (lhs, 0);
1580 }
1581
1582 if (GET_CODE (rhs) == NEG)
1583 coeff1 = - 1, rhs = XEXP (rhs, 0);
1584 else if (GET_CODE (rhs) == MULT
1585 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1586 {
1587 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1588 had_mult = 1;
1589 }
1590 else if (GET_CODE (rhs) == ASHIFT
1591 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1592 && INTVAL (XEXP (rhs, 1)) >= 0
1593 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1594 {
1595 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1596 rhs = XEXP (rhs, 0);
1597 }
1598
1599 if (rtx_equal_p (lhs, rhs))
1600 {
1601 tem = simplify_gen_binary (MULT, mode, lhs,
1602 GEN_INT (coeff0 - coeff1));
1603 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1604 }
1605 }
1606
1607 /* (a - (-b)) -> (a + b). True even for IEEE. */
1608 if (GET_CODE (op1) == NEG)
1609 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1610
1611 /* (-x - c) may be simplified as (-c - x). */
1612 if (GET_CODE (op0) == NEG
1613 && (GET_CODE (op1) == CONST_INT
1614 || GET_CODE (op1) == CONST_DOUBLE))
1615 {
1616 tem = simplify_unary_operation (NEG, mode, op1, mode);
1617 if (tem)
1618 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1619 }
1620
1621 /* If one of the operands is a PLUS or a MINUS, see if we can
1622 simplify this by the associative law.
1623 Don't use the associative law for floating point.
1624 The inaccuracy makes it nonassociative,
1625 and subtle programs can break if operations are associated. */
1626
1627 if (INTEGRAL_MODE_P (mode)
1628 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1629 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1630 || (GET_CODE (op0) == CONST
1631 && GET_CODE (XEXP (op0, 0)) == PLUS)
1632 || (GET_CODE (op1) == CONST
1633 && GET_CODE (XEXP (op1, 0)) == PLUS))
1634 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1635 return tem;
1636
1637 /* Don't let a relocatable value get a negative coeff. */
1638 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1639 return simplify_gen_binary (PLUS, mode,
1640 op0,
1641 neg_const_int (mode, op1));
1642
1643 /* (x - (x & y)) -> (x & ~y) */
1644 if (GET_CODE (op1) == AND)
1645 {
1646 if (rtx_equal_p (op0, XEXP (op1, 0)))
1647 {
1648 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1649 GET_MODE (XEXP (op1, 1)));
1650 return simplify_gen_binary (AND, mode, op0, tem);
1651 }
1652 if (rtx_equal_p (op0, XEXP (op1, 1)))
1653 {
1654 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1655 GET_MODE (XEXP (op1, 0)));
1656 return simplify_gen_binary (AND, mode, op0, tem);
1657 }
1658 }
1659 break;
1660
1661 case MULT:
1662 if (trueop1 == constm1_rtx)
1663 return simplify_gen_unary (NEG, mode, op0, mode);
1664
1665 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1666 x is NaN, since x * 0 is then also NaN. Nor is it valid
1667 when the mode has signed zeros, since multiplying a negative
1668 number by 0 will give -0, not 0. */
1669 if (!HONOR_NANS (mode)
1670 && !HONOR_SIGNED_ZEROS (mode)
1671 && trueop1 == CONST0_RTX (mode)
1672 && ! side_effects_p (op0))
1673 return op1;
1674
1675 /* In IEEE floating point, x*1 is not equivalent to x for
1676 signalling NaNs. */
1677 if (!HONOR_SNANS (mode)
1678 && trueop1 == CONST1_RTX (mode))
1679 return op0;
1680
1681 /* Convert multiply by constant power of two into shift unless
1682 we are still generating RTL. This test is a kludge. */
1683 if (GET_CODE (trueop1) == CONST_INT
1684 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1685 /* If the mode is larger than the host word size, and the
1686 uppermost bit is set, then this isn't a power of two due
1687 to implicit sign extension. */
1688 && (width <= HOST_BITS_PER_WIDE_INT
1689 || val != HOST_BITS_PER_WIDE_INT - 1)
1690 && ! rtx_equal_function_value_matters)
1691 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1692
1693 /* x*2 is x+x and x*(-1) is -x */
1694 if (GET_CODE (trueop1) == CONST_DOUBLE
1695 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1696 && GET_MODE (op0) == mode)
1697 {
1698 REAL_VALUE_TYPE d;
1699 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1700
1701 if (REAL_VALUES_EQUAL (d, dconst2))
1702 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1703
1704 if (REAL_VALUES_EQUAL (d, dconstm1))
1705 return simplify_gen_unary (NEG, mode, op0, mode);
1706 }
1707
1708 /* Reassociate multiplication, but for floating point MULTs
1709 only when the user specifies unsafe math optimizations. */
1710 if (! FLOAT_MODE_P (mode)
1711 || flag_unsafe_math_optimizations)
1712 {
1713 tem = simplify_associative_operation (code, mode, op0, op1);
1714 if (tem)
1715 return tem;
1716 }
1717 break;
1718
1719 case IOR:
1720 if (trueop1 == const0_rtx)
1721 return op0;
1722 if (GET_CODE (trueop1) == CONST_INT
1723 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1724 == GET_MODE_MASK (mode)))
1725 return op1;
1726 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1727 return op0;
1728 /* A | (~A) -> -1 */
1729 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1730 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1731 && ! side_effects_p (op0)
1732 && GET_MODE_CLASS (mode) != MODE_CC)
1733 return constm1_rtx;
1734 tem = simplify_associative_operation (code, mode, op0, op1);
1735 if (tem)
1736 return tem;
1737 break;
1738
1739 case XOR:
1740 if (trueop1 == const0_rtx)
1741 return op0;
1742 if (GET_CODE (trueop1) == CONST_INT
1743 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1744 == GET_MODE_MASK (mode)))
1745 return simplify_gen_unary (NOT, mode, op0, mode);
1746 if (trueop0 == trueop1 && ! side_effects_p (op0)
1747 && GET_MODE_CLASS (mode) != MODE_CC)
1748 return const0_rtx;
1749 tem = simplify_associative_operation (code, mode, op0, op1);
1750 if (tem)
1751 return tem;
1752 break;
1753
1754 case AND:
1755 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1756 return const0_rtx;
1757 if (GET_CODE (trueop1) == CONST_INT
1758 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1759 == GET_MODE_MASK (mode)))
1760 return op0;
1761 if (trueop0 == trueop1 && ! side_effects_p (op0)
1762 && GET_MODE_CLASS (mode) != MODE_CC)
1763 return op0;
1764 /* A & (~A) -> 0 */
1765 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1766 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1767 && ! side_effects_p (op0)
1768 && GET_MODE_CLASS (mode) != MODE_CC)
1769 return const0_rtx;
1770 tem = simplify_associative_operation (code, mode, op0, op1);
1771 if (tem)
1772 return tem;
1773 break;
1774
1775 case UDIV:
1776 /* Convert divide by power of two into shift (divide by 1 handled
1777 below). */
1778 if (GET_CODE (trueop1) == CONST_INT
1779 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1780 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1781
1782 /* Fall through.... */
1783
1784 case DIV:
1785 if (trueop1 == CONST1_RTX (mode))
1786 {
1787 /* On some platforms DIV uses narrower mode than its
1788 operands. */
1789 rtx x = gen_lowpart_common (mode, op0);
1790 if (x)
1791 return x;
1792 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1793 return gen_lowpart_SUBREG (mode, op0);
1794 else
1795 return op0;
1796 }
1797
1798 /* Maybe change 0 / x to 0. This transformation isn't safe for
1799 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1800 Nor is it safe for modes with signed zeros, since dividing
1801 0 by a negative number gives -0, not 0. */
1802 if (!HONOR_NANS (mode)
1803 && !HONOR_SIGNED_ZEROS (mode)
1804 && trueop0 == CONST0_RTX (mode)
1805 && ! side_effects_p (op1))
1806 return op0;
1807
1808 /* Change division by a constant into multiplication. Only do
1809 this with -funsafe-math-optimizations. */
1810 else if (GET_CODE (trueop1) == CONST_DOUBLE
1811 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1812 && trueop1 != CONST0_RTX (mode)
1813 && flag_unsafe_math_optimizations)
1814 {
1815 REAL_VALUE_TYPE d;
1816 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1817
1818 if (! REAL_VALUES_EQUAL (d, dconst0))
1819 {
1820 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1821 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1822 return simplify_gen_binary (MULT, mode, op0, tem);
1823 }
1824 }
1825 break;
1826
1827 case UMOD:
1828 /* Handle modulus by power of two (mod with 1 handled below). */
1829 if (GET_CODE (trueop1) == CONST_INT
1830 && exact_log2 (INTVAL (trueop1)) > 0)
1831 return simplify_gen_binary (AND, mode, op0,
1832 GEN_INT (INTVAL (op1) - 1));
1833
1834 /* Fall through.... */
1835
1836 case MOD:
1837 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1838 && ! side_effects_p (op0) && ! side_effects_p (op1))
1839 return const0_rtx;
1840 break;
1841
1842 case ROTATERT:
1843 case ROTATE:
1844 case ASHIFTRT:
1845 /* Rotating ~0 always results in ~0. */
1846 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1847 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1848 && ! side_effects_p (op1))
1849 return op0;
1850
1851 /* Fall through.... */
1852
1853 case ASHIFT:
1854 case LSHIFTRT:
1855 if (trueop1 == const0_rtx)
1856 return op0;
1857 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1858 return op0;
1859 break;
1860
1861 case SMIN:
1862 if (width <= HOST_BITS_PER_WIDE_INT
1863 && GET_CODE (trueop1) == CONST_INT
1864 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1865 && ! side_effects_p (op0))
1866 return op1;
1867 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1868 return op0;
1869 tem = simplify_associative_operation (code, mode, op0, op1);
1870 if (tem)
1871 return tem;
1872 break;
1873
1874 case SMAX:
1875 if (width <= HOST_BITS_PER_WIDE_INT
1876 && GET_CODE (trueop1) == CONST_INT
1877 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1878 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1879 && ! side_effects_p (op0))
1880 return op1;
1881 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1882 return op0;
1883 tem = simplify_associative_operation (code, mode, op0, op1);
1884 if (tem)
1885 return tem;
1886 break;
1887
1888 case UMIN:
1889 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1890 return op1;
1891 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1892 return op0;
1893 tem = simplify_associative_operation (code, mode, op0, op1);
1894 if (tem)
1895 return tem;
1896 break;
1897
1898 case UMAX:
1899 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1900 return op1;
1901 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1902 return op0;
1903 tem = simplify_associative_operation (code, mode, op0, op1);
1904 if (tem)
1905 return tem;
1906 break;
1907
1908 case SS_PLUS:
1909 case US_PLUS:
1910 case SS_MINUS:
1911 case US_MINUS:
1912 /* ??? There are simplifications that can be done. */
1913 return 0;
1914
1915 case VEC_SELECT:
1916 if (!VECTOR_MODE_P (mode))
1917 {
1918 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1919 || (mode
1920 != GET_MODE_INNER (GET_MODE (trueop0)))
1921 || GET_CODE (trueop1) != PARALLEL
1922 || XVECLEN (trueop1, 0) != 1
1923 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1924 abort ();
1925
1926 if (GET_CODE (trueop0) == CONST_VECTOR)
1927 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1928 }
1929 else
1930 {
1931 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1932 || (GET_MODE_INNER (mode)
1933 != GET_MODE_INNER (GET_MODE (trueop0)))
1934 || GET_CODE (trueop1) != PARALLEL)
1935 abort ();
1936
1937 if (GET_CODE (trueop0) == CONST_VECTOR)
1938 {
1939 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1940 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1941 rtvec v = rtvec_alloc (n_elts);
1942 unsigned int i;
1943
1944 if (XVECLEN (trueop1, 0) != (int) n_elts)
1945 abort ();
1946 for (i = 0; i < n_elts; i++)
1947 {
1948 rtx x = XVECEXP (trueop1, 0, i);
1949
1950 if (GET_CODE (x) != CONST_INT)
1951 abort ();
1952 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1953 }
1954
1955 return gen_rtx_CONST_VECTOR (mode, v);
1956 }
1957 }
1958 return 0;
1959 case VEC_CONCAT:
1960 {
1961 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1962 ? GET_MODE (trueop0)
1963 : GET_MODE_INNER (mode));
1964 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1965 ? GET_MODE (trueop1)
1966 : GET_MODE_INNER (mode));
1967
1968 if (!VECTOR_MODE_P (mode)
1969 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1970 != GET_MODE_SIZE (mode)))
1971 abort ();
1972
1973 if ((VECTOR_MODE_P (op0_mode)
1974 && (GET_MODE_INNER (mode)
1975 != GET_MODE_INNER (op0_mode)))
1976 || (!VECTOR_MODE_P (op0_mode)
1977 && GET_MODE_INNER (mode) != op0_mode))
1978 abort ();
1979
1980 if ((VECTOR_MODE_P (op1_mode)
1981 && (GET_MODE_INNER (mode)
1982 != GET_MODE_INNER (op1_mode)))
1983 || (!VECTOR_MODE_P (op1_mode)
1984 && GET_MODE_INNER (mode) != op1_mode))
1985 abort ();
1986
1987 if ((GET_CODE (trueop0) == CONST_VECTOR
1988 || GET_CODE (trueop0) == CONST_INT
1989 || GET_CODE (trueop0) == CONST_DOUBLE)
1990 && (GET_CODE (trueop1) == CONST_VECTOR
1991 || GET_CODE (trueop1) == CONST_INT
1992 || GET_CODE (trueop1) == CONST_DOUBLE))
1993 {
1994 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1995 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1996 rtvec v = rtvec_alloc (n_elts);
1997 unsigned int i;
1998 unsigned in_n_elts = 1;
1999
2000 if (VECTOR_MODE_P (op0_mode))
2001 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2002 for (i = 0; i < n_elts; i++)
2003 {
2004 if (i < in_n_elts)
2005 {
2006 if (!VECTOR_MODE_P (op0_mode))
2007 RTVEC_ELT (v, i) = trueop0;
2008 else
2009 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2010 }
2011 else
2012 {
2013 if (!VECTOR_MODE_P (op1_mode))
2014 RTVEC_ELT (v, i) = trueop1;
2015 else
2016 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2017 i - in_n_elts);
2018 }
2019 }
2020
2021 return gen_rtx_CONST_VECTOR (mode, v);
2022 }
2023 }
2024 return 0;
2025
2026 default:
2027 abort ();
2028 }
2029
2030 return 0;
2031 }
2032
2033 /* Get the integer argument values in two forms:
2034 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2035
2036 arg0 = INTVAL (trueop0);
2037 arg1 = INTVAL (trueop1);
2038
2039 if (width < HOST_BITS_PER_WIDE_INT)
2040 {
2041 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2042 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2043
2044 arg0s = arg0;
2045 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2046 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2047
2048 arg1s = arg1;
2049 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2050 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2051 }
2052 else
2053 {
2054 arg0s = arg0;
2055 arg1s = arg1;
2056 }
2057
2058 /* Compute the value of the arithmetic. */
2059
2060 switch (code)
2061 {
2062 case PLUS:
2063 val = arg0s + arg1s;
2064 break;
2065
2066 case MINUS:
2067 val = arg0s - arg1s;
2068 break;
2069
2070 case MULT:
2071 val = arg0s * arg1s;
2072 break;
2073
2074 case DIV:
2075 if (arg1s == 0
2076 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2077 && arg1s == -1))
2078 return 0;
2079 val = arg0s / arg1s;
2080 break;
2081
2082 case MOD:
2083 if (arg1s == 0
2084 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2085 && arg1s == -1))
2086 return 0;
2087 val = arg0s % arg1s;
2088 break;
2089
2090 case UDIV:
2091 if (arg1 == 0
2092 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2093 && arg1s == -1))
2094 return 0;
2095 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2096 break;
2097
2098 case UMOD:
2099 if (arg1 == 0
2100 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2101 && arg1s == -1))
2102 return 0;
2103 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2104 break;
2105
2106 case AND:
2107 val = arg0 & arg1;
2108 break;
2109
2110 case IOR:
2111 val = arg0 | arg1;
2112 break;
2113
2114 case XOR:
2115 val = arg0 ^ arg1;
2116 break;
2117
2118 case LSHIFTRT:
2119 /* If shift count is undefined, don't fold it; let the machine do
2120 what it wants. But truncate it if the machine will do that. */
2121 if (arg1 < 0)
2122 return 0;
2123
2124 #ifdef SHIFT_COUNT_TRUNCATED
2125 if (SHIFT_COUNT_TRUNCATED)
2126 arg1 %= width;
2127 #endif
2128
2129 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2130 break;
2131
2132 case ASHIFT:
2133 if (arg1 < 0)
2134 return 0;
2135
2136 #ifdef SHIFT_COUNT_TRUNCATED
2137 if (SHIFT_COUNT_TRUNCATED)
2138 arg1 %= width;
2139 #endif
2140
2141 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2142 break;
2143
2144 case ASHIFTRT:
2145 if (arg1 < 0)
2146 return 0;
2147
2148 #ifdef SHIFT_COUNT_TRUNCATED
2149 if (SHIFT_COUNT_TRUNCATED)
2150 arg1 %= width;
2151 #endif
2152
2153 val = arg0s >> arg1;
2154
2155 /* Bootstrap compiler may not have sign extended the right shift.
2156 Manually extend the sign to insure bootstrap cc matches gcc. */
2157 if (arg0s < 0 && arg1 > 0)
2158 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2159
2160 break;
2161
2162 case ROTATERT:
2163 if (arg1 < 0)
2164 return 0;
2165
2166 arg1 %= width;
2167 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2168 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2169 break;
2170
2171 case ROTATE:
2172 if (arg1 < 0)
2173 return 0;
2174
2175 arg1 %= width;
2176 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2177 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2178 break;
2179
2180 case COMPARE:
2181 /* Do nothing here. */
2182 return 0;
2183
2184 case SMIN:
2185 val = arg0s <= arg1s ? arg0s : arg1s;
2186 break;
2187
2188 case UMIN:
2189 val = ((unsigned HOST_WIDE_INT) arg0
2190 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2191 break;
2192
2193 case SMAX:
2194 val = arg0s > arg1s ? arg0s : arg1s;
2195 break;
2196
2197 case UMAX:
2198 val = ((unsigned HOST_WIDE_INT) arg0
2199 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2200 break;
2201
2202 case SS_PLUS:
2203 case US_PLUS:
2204 case SS_MINUS:
2205 case US_MINUS:
2206 /* ??? There are simplifications that can be done. */
2207 return 0;
2208
2209 default:
2210 abort ();
2211 }
2212
2213 val = trunc_int_for_mode (val, mode);
2214
2215 return GEN_INT (val);
2216 }
2217 \f
2218 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2219 PLUS or MINUS.
2220
2221 Rather than test for specific case, we do this by a brute-force method
2222 and do all possible simplifications until no more changes occur. Then
2223 we rebuild the operation.
2224
2225 If FORCE is true, then always generate the rtx. This is used to
2226 canonicalize stuff emitted from simplify_gen_binary. Note that this
2227 can still fail if the rtx is too complex. It won't fail just because
2228 the result is not 'simpler' than the input, however. */
2229
2230 struct simplify_plus_minus_op_data
2231 {
2232 rtx op;
2233 int neg;
2234 };
2235
2236 static int
2237 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2238 {
2239 const struct simplify_plus_minus_op_data *d1 = p1;
2240 const struct simplify_plus_minus_op_data *d2 = p2;
2241
2242 return (commutative_operand_precedence (d2->op)
2243 - commutative_operand_precedence (d1->op));
2244 }
2245
2246 static rtx
2247 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2248 rtx op1, int force)
2249 {
2250 struct simplify_plus_minus_op_data ops[8];
2251 rtx result, tem;
2252 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2253 int first, changed;
2254 int i, j;
2255
2256 memset (ops, 0, sizeof ops);
2257
2258 /* Set up the two operands and then expand them until nothing has been
2259 changed. If we run out of room in our array, give up; this should
2260 almost never happen. */
2261
2262 ops[0].op = op0;
2263 ops[0].neg = 0;
2264 ops[1].op = op1;
2265 ops[1].neg = (code == MINUS);
2266
2267 do
2268 {
2269 changed = 0;
2270
2271 for (i = 0; i < n_ops; i++)
2272 {
2273 rtx this_op = ops[i].op;
2274 int this_neg = ops[i].neg;
2275 enum rtx_code this_code = GET_CODE (this_op);
2276
2277 switch (this_code)
2278 {
2279 case PLUS:
2280 case MINUS:
2281 if (n_ops == 7)
2282 return NULL_RTX;
2283
2284 ops[n_ops].op = XEXP (this_op, 1);
2285 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2286 n_ops++;
2287
2288 ops[i].op = XEXP (this_op, 0);
2289 input_ops++;
2290 changed = 1;
2291 break;
2292
2293 case NEG:
2294 ops[i].op = XEXP (this_op, 0);
2295 ops[i].neg = ! this_neg;
2296 changed = 1;
2297 break;
2298
2299 case CONST:
2300 if (n_ops < 7
2301 && GET_CODE (XEXP (this_op, 0)) == PLUS
2302 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2303 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2304 {
2305 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2306 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2307 ops[n_ops].neg = this_neg;
2308 n_ops++;
2309 input_consts++;
2310 changed = 1;
2311 }
2312 break;
2313
2314 case NOT:
2315 /* ~a -> (-a - 1) */
2316 if (n_ops != 7)
2317 {
2318 ops[n_ops].op = constm1_rtx;
2319 ops[n_ops++].neg = this_neg;
2320 ops[i].op = XEXP (this_op, 0);
2321 ops[i].neg = !this_neg;
2322 changed = 1;
2323 }
2324 break;
2325
2326 case CONST_INT:
2327 if (this_neg)
2328 {
2329 ops[i].op = neg_const_int (mode, this_op);
2330 ops[i].neg = 0;
2331 changed = 1;
2332 }
2333 break;
2334
2335 default:
2336 break;
2337 }
2338 }
2339 }
2340 while (changed);
2341
2342 /* If we only have two operands, we can't do anything. */
2343 if (n_ops <= 2 && !force)
2344 return NULL_RTX;
2345
2346 /* Count the number of CONSTs we didn't split above. */
2347 for (i = 0; i < n_ops; i++)
2348 if (GET_CODE (ops[i].op) == CONST)
2349 input_consts++;
2350
2351 /* Now simplify each pair of operands until nothing changes. The first
2352 time through just simplify constants against each other. */
2353
2354 first = 1;
2355 do
2356 {
2357 changed = first;
2358
2359 for (i = 0; i < n_ops - 1; i++)
2360 for (j = i + 1; j < n_ops; j++)
2361 {
2362 rtx lhs = ops[i].op, rhs = ops[j].op;
2363 int lneg = ops[i].neg, rneg = ops[j].neg;
2364
2365 if (lhs != 0 && rhs != 0
2366 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2367 {
2368 enum rtx_code ncode = PLUS;
2369
2370 if (lneg != rneg)
2371 {
2372 ncode = MINUS;
2373 if (lneg)
2374 tem = lhs, lhs = rhs, rhs = tem;
2375 }
2376 else if (swap_commutative_operands_p (lhs, rhs))
2377 tem = lhs, lhs = rhs, rhs = tem;
2378
2379 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2380
2381 /* Reject "simplifications" that just wrap the two
2382 arguments in a CONST. Failure to do so can result
2383 in infinite recursion with simplify_binary_operation
2384 when it calls us to simplify CONST operations. */
2385 if (tem
2386 && ! (GET_CODE (tem) == CONST
2387 && GET_CODE (XEXP (tem, 0)) == ncode
2388 && XEXP (XEXP (tem, 0), 0) == lhs
2389 && XEXP (XEXP (tem, 0), 1) == rhs)
2390 /* Don't allow -x + -1 -> ~x simplifications in the
2391 first pass. This allows us the chance to combine
2392 the -1 with other constants. */
2393 && ! (first
2394 && GET_CODE (tem) == NOT
2395 && XEXP (tem, 0) == rhs))
2396 {
2397 lneg &= rneg;
2398 if (GET_CODE (tem) == NEG)
2399 tem = XEXP (tem, 0), lneg = !lneg;
2400 if (GET_CODE (tem) == CONST_INT && lneg)
2401 tem = neg_const_int (mode, tem), lneg = 0;
2402
2403 ops[i].op = tem;
2404 ops[i].neg = lneg;
2405 ops[j].op = NULL_RTX;
2406 changed = 1;
2407 }
2408 }
2409 }
2410
2411 first = 0;
2412 }
2413 while (changed);
2414
2415 /* Pack all the operands to the lower-numbered entries. */
2416 for (i = 0, j = 0; j < n_ops; j++)
2417 if (ops[j].op)
2418 ops[i++] = ops[j];
2419 n_ops = i;
2420
2421 /* Sort the operations based on swap_commutative_operands_p. */
2422 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2423
2424 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2425 if (n_ops == 2
2426 && GET_CODE (ops[1].op) == CONST_INT
2427 && CONSTANT_P (ops[0].op)
2428 && ops[0].neg)
2429 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2430
2431 /* We suppressed creation of trivial CONST expressions in the
2432 combination loop to avoid recursion. Create one manually now.
2433 The combination loop should have ensured that there is exactly
2434 one CONST_INT, and the sort will have ensured that it is last
2435 in the array and that any other constant will be next-to-last. */
2436
2437 if (n_ops > 1
2438 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2439 && CONSTANT_P (ops[n_ops - 2].op))
2440 {
2441 rtx value = ops[n_ops - 1].op;
2442 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2443 value = neg_const_int (mode, value);
2444 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2445 n_ops--;
2446 }
2447
2448 /* Count the number of CONSTs that we generated. */
2449 n_consts = 0;
2450 for (i = 0; i < n_ops; i++)
2451 if (GET_CODE (ops[i].op) == CONST)
2452 n_consts++;
2453
2454 /* Give up if we didn't reduce the number of operands we had. Make
2455 sure we count a CONST as two operands. If we have the same
2456 number of operands, but have made more CONSTs than before, this
2457 is also an improvement, so accept it. */
2458 if (!force
2459 && (n_ops + n_consts > input_ops
2460 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2461 return NULL_RTX;
2462
2463 /* Put a non-negated operand first, if possible. */
2464
2465 for (i = 0; i < n_ops && ops[i].neg; i++)
2466 continue;
2467 if (i == n_ops)
2468 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2469 else if (i != 0)
2470 {
2471 tem = ops[0].op;
2472 ops[0] = ops[i];
2473 ops[i].op = tem;
2474 ops[i].neg = 1;
2475 }
2476
2477 /* Now make the result by performing the requested operations. */
2478 result = ops[0].op;
2479 for (i = 1; i < n_ops; i++)
2480 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2481 mode, result, ops[i].op);
2482
2483 return result;
2484 }
2485
2486 /* Like simplify_binary_operation except used for relational operators.
2487 MODE is the mode of the operands, not that of the result. If MODE
2488 is VOIDmode, both operands must also be VOIDmode and we compare the
2489 operands in "infinite precision".
2490
2491 If no simplification is possible, this function returns zero. Otherwise,
2492 it returns either const_true_rtx or const0_rtx. */
2493
2494 rtx
2495 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2496 rtx op0, rtx op1)
2497 {
2498 int equal, op0lt, op0ltu, op1lt, op1ltu;
2499 rtx tem;
2500 rtx trueop0;
2501 rtx trueop1;
2502
2503 if (mode == VOIDmode
2504 && (GET_MODE (op0) != VOIDmode
2505 || GET_MODE (op1) != VOIDmode))
2506 abort ();
2507
2508 /* If op0 is a compare, extract the comparison arguments from it. */
2509 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2510 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2511
2512 trueop0 = avoid_constant_pool_reference (op0);
2513 trueop1 = avoid_constant_pool_reference (op1);
2514
2515 /* We can't simplify MODE_CC values since we don't know what the
2516 actual comparison is. */
2517 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2518 return 0;
2519
2520 /* Make sure the constant is second. */
2521 if (swap_commutative_operands_p (trueop0, trueop1))
2522 {
2523 tem = op0, op0 = op1, op1 = tem;
2524 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2525 code = swap_condition (code);
2526 }
2527
2528 /* For integer comparisons of A and B maybe we can simplify A - B and can
2529 then simplify a comparison of that with zero. If A and B are both either
2530 a register or a CONST_INT, this can't help; testing for these cases will
2531 prevent infinite recursion here and speed things up.
2532
2533 If CODE is an unsigned comparison, then we can never do this optimization,
2534 because it gives an incorrect result if the subtraction wraps around zero.
2535 ANSI C defines unsigned operations such that they never overflow, and
2536 thus such cases can not be ignored. */
2537
2538 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2539 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2540 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2541 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2542 && code != GTU && code != GEU && code != LTU && code != LEU)
2543 return simplify_relational_operation (signed_condition (code),
2544 mode, tem, const0_rtx);
2545
2546 if (flag_unsafe_math_optimizations && code == ORDERED)
2547 return const_true_rtx;
2548
2549 if (flag_unsafe_math_optimizations && code == UNORDERED)
2550 return const0_rtx;
2551
2552 /* For modes without NaNs, if the two operands are equal, we know the
2553 result except if they have side-effects. */
2554 if (! HONOR_NANS (GET_MODE (trueop0))
2555 && rtx_equal_p (trueop0, trueop1)
2556 && ! side_effects_p (trueop0))
2557 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2558
2559 /* If the operands are floating-point constants, see if we can fold
2560 the result. */
2561 else if (GET_CODE (trueop0) == CONST_DOUBLE
2562 && GET_CODE (trueop1) == CONST_DOUBLE
2563 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2564 {
2565 REAL_VALUE_TYPE d0, d1;
2566
2567 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2568 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2569
2570 /* Comparisons are unordered iff at least one of the values is NaN. */
2571 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2572 switch (code)
2573 {
2574 case UNEQ:
2575 case UNLT:
2576 case UNGT:
2577 case UNLE:
2578 case UNGE:
2579 case NE:
2580 case UNORDERED:
2581 return const_true_rtx;
2582 case EQ:
2583 case LT:
2584 case GT:
2585 case LE:
2586 case GE:
2587 case LTGT:
2588 case ORDERED:
2589 return const0_rtx;
2590 default:
2591 return 0;
2592 }
2593
2594 equal = REAL_VALUES_EQUAL (d0, d1);
2595 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2596 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2597 }
2598
2599 /* Otherwise, see if the operands are both integers. */
2600 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2601 && (GET_CODE (trueop0) == CONST_DOUBLE
2602 || GET_CODE (trueop0) == CONST_INT)
2603 && (GET_CODE (trueop1) == CONST_DOUBLE
2604 || GET_CODE (trueop1) == CONST_INT))
2605 {
2606 int width = GET_MODE_BITSIZE (mode);
2607 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2608 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2609
2610 /* Get the two words comprising each integer constant. */
2611 if (GET_CODE (trueop0) == CONST_DOUBLE)
2612 {
2613 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2614 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2615 }
2616 else
2617 {
2618 l0u = l0s = INTVAL (trueop0);
2619 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2620 }
2621
2622 if (GET_CODE (trueop1) == CONST_DOUBLE)
2623 {
2624 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2625 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2626 }
2627 else
2628 {
2629 l1u = l1s = INTVAL (trueop1);
2630 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2631 }
2632
2633 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2634 we have to sign or zero-extend the values. */
2635 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2636 {
2637 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2638 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2639
2640 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2641 l0s |= ((HOST_WIDE_INT) (-1) << width);
2642
2643 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2644 l1s |= ((HOST_WIDE_INT) (-1) << width);
2645 }
2646 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2647 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2648
2649 equal = (h0u == h1u && l0u == l1u);
2650 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2651 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2652 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2653 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2654 }
2655
2656 /* Otherwise, there are some code-specific tests we can make. */
2657 else
2658 {
2659 switch (code)
2660 {
2661 case EQ:
2662 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2663 return const0_rtx;
2664 break;
2665
2666 case NE:
2667 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2668 return const_true_rtx;
2669 break;
2670
2671 case GEU:
2672 /* Unsigned values are never negative. */
2673 if (trueop1 == const0_rtx)
2674 return const_true_rtx;
2675 break;
2676
2677 case LTU:
2678 if (trueop1 == const0_rtx)
2679 return const0_rtx;
2680 break;
2681
2682 case LEU:
2683 /* Unsigned values are never greater than the largest
2684 unsigned value. */
2685 if (GET_CODE (trueop1) == CONST_INT
2686 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2687 && INTEGRAL_MODE_P (mode))
2688 return const_true_rtx;
2689 break;
2690
2691 case GTU:
2692 if (GET_CODE (trueop1) == CONST_INT
2693 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2694 && INTEGRAL_MODE_P (mode))
2695 return const0_rtx;
2696 break;
2697
2698 case LT:
2699 /* Optimize abs(x) < 0.0. */
2700 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2701 {
2702 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2703 : trueop0;
2704 if (GET_CODE (tem) == ABS)
2705 return const0_rtx;
2706 }
2707 break;
2708
2709 case GE:
2710 /* Optimize abs(x) >= 0.0. */
2711 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2712 {
2713 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2714 : trueop0;
2715 if (GET_CODE (tem) == ABS)
2716 return const_true_rtx;
2717 }
2718 break;
2719
2720 case UNGE:
2721 /* Optimize ! (abs(x) < 0.0). */
2722 if (trueop1 == CONST0_RTX (mode))
2723 {
2724 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2725 : trueop0;
2726 if (GET_CODE (tem) == ABS)
2727 return const_true_rtx;
2728 }
2729 break;
2730
2731 default:
2732 break;
2733 }
2734
2735 return 0;
2736 }
2737
2738 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2739 as appropriate. */
2740 switch (code)
2741 {
2742 case EQ:
2743 case UNEQ:
2744 return equal ? const_true_rtx : const0_rtx;
2745 case NE:
2746 case LTGT:
2747 return ! equal ? const_true_rtx : const0_rtx;
2748 case LT:
2749 case UNLT:
2750 return op0lt ? const_true_rtx : const0_rtx;
2751 case GT:
2752 case UNGT:
2753 return op1lt ? const_true_rtx : const0_rtx;
2754 case LTU:
2755 return op0ltu ? const_true_rtx : const0_rtx;
2756 case GTU:
2757 return op1ltu ? const_true_rtx : const0_rtx;
2758 case LE:
2759 case UNLE:
2760 return equal || op0lt ? const_true_rtx : const0_rtx;
2761 case GE:
2762 case UNGE:
2763 return equal || op1lt ? const_true_rtx : const0_rtx;
2764 case LEU:
2765 return equal || op0ltu ? const_true_rtx : const0_rtx;
2766 case GEU:
2767 return equal || op1ltu ? const_true_rtx : const0_rtx;
2768 case ORDERED:
2769 return const_true_rtx;
2770 case UNORDERED:
2771 return const0_rtx;
2772 default:
2773 abort ();
2774 }
2775 }
2776 \f
2777 /* Simplify CODE, an operation with result mode MODE and three operands,
2778 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2779 a constant. Return 0 if no simplifications is possible. */
2780
2781 rtx
2782 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2783 enum machine_mode op0_mode, rtx op0, rtx op1,
2784 rtx op2)
2785 {
2786 unsigned int width = GET_MODE_BITSIZE (mode);
2787
2788 /* VOIDmode means "infinite" precision. */
2789 if (width == 0)
2790 width = HOST_BITS_PER_WIDE_INT;
2791
2792 switch (code)
2793 {
2794 case SIGN_EXTRACT:
2795 case ZERO_EXTRACT:
2796 if (GET_CODE (op0) == CONST_INT
2797 && GET_CODE (op1) == CONST_INT
2798 && GET_CODE (op2) == CONST_INT
2799 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2800 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2801 {
2802 /* Extracting a bit-field from a constant */
2803 HOST_WIDE_INT val = INTVAL (op0);
2804
2805 if (BITS_BIG_ENDIAN)
2806 val >>= (GET_MODE_BITSIZE (op0_mode)
2807 - INTVAL (op2) - INTVAL (op1));
2808 else
2809 val >>= INTVAL (op2);
2810
2811 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2812 {
2813 /* First zero-extend. */
2814 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2815 /* If desired, propagate sign bit. */
2816 if (code == SIGN_EXTRACT
2817 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2818 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2819 }
2820
2821 /* Clear the bits that don't belong in our mode,
2822 unless they and our sign bit are all one.
2823 So we get either a reasonable negative value or a reasonable
2824 unsigned value for this mode. */
2825 if (width < HOST_BITS_PER_WIDE_INT
2826 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2827 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2828 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2829
2830 return GEN_INT (val);
2831 }
2832 break;
2833
2834 case IF_THEN_ELSE:
2835 if (GET_CODE (op0) == CONST_INT)
2836 return op0 != const0_rtx ? op1 : op2;
2837
2838 /* Convert c ? a : a into "a". */
2839 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2840 return op1;
2841
2842 /* Convert a != b ? a : b into "a". */
2843 if (GET_CODE (op0) == NE
2844 && ! side_effects_p (op0)
2845 && ! HONOR_NANS (mode)
2846 && ! HONOR_SIGNED_ZEROS (mode)
2847 && ((rtx_equal_p (XEXP (op0, 0), op1)
2848 && rtx_equal_p (XEXP (op0, 1), op2))
2849 || (rtx_equal_p (XEXP (op0, 0), op2)
2850 && rtx_equal_p (XEXP (op0, 1), op1))))
2851 return op1;
2852
2853 /* Convert a == b ? a : b into "b". */
2854 if (GET_CODE (op0) == EQ
2855 && ! side_effects_p (op0)
2856 && ! HONOR_NANS (mode)
2857 && ! HONOR_SIGNED_ZEROS (mode)
2858 && ((rtx_equal_p (XEXP (op0, 0), op1)
2859 && rtx_equal_p (XEXP (op0, 1), op2))
2860 || (rtx_equal_p (XEXP (op0, 0), op2)
2861 && rtx_equal_p (XEXP (op0, 1), op1))))
2862 return op2;
2863
2864 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2865 {
2866 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2867 ? GET_MODE (XEXP (op0, 1))
2868 : GET_MODE (XEXP (op0, 0)));
2869 rtx temp;
2870 if (cmp_mode == VOIDmode)
2871 cmp_mode = op0_mode;
2872 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2873 XEXP (op0, 0), XEXP (op0, 1));
2874
2875 /* See if any simplifications were possible. */
2876 if (temp == const0_rtx)
2877 return op2;
2878 else if (temp == const_true_rtx)
2879 return op1;
2880 else if (temp)
2881 abort ();
2882
2883 /* Look for happy constants in op1 and op2. */
2884 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2885 {
2886 HOST_WIDE_INT t = INTVAL (op1);
2887 HOST_WIDE_INT f = INTVAL (op2);
2888
2889 if (t == STORE_FLAG_VALUE && f == 0)
2890 code = GET_CODE (op0);
2891 else if (t == 0 && f == STORE_FLAG_VALUE)
2892 {
2893 enum rtx_code tmp;
2894 tmp = reversed_comparison_code (op0, NULL_RTX);
2895 if (tmp == UNKNOWN)
2896 break;
2897 code = tmp;
2898 }
2899 else
2900 break;
2901
2902 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2903 }
2904 }
2905 break;
2906
2907 case VEC_MERGE:
2908 if (GET_MODE (op0) != mode
2909 || GET_MODE (op1) != mode
2910 || !VECTOR_MODE_P (mode))
2911 abort ();
2912 op2 = avoid_constant_pool_reference (op2);
2913 if (GET_CODE (op2) == CONST_INT)
2914 {
2915 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2916 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2917 int mask = (1 << n_elts) - 1;
2918
2919 if (!(INTVAL (op2) & mask))
2920 return op1;
2921 if ((INTVAL (op2) & mask) == mask)
2922 return op0;
2923
2924 op0 = avoid_constant_pool_reference (op0);
2925 op1 = avoid_constant_pool_reference (op1);
2926 if (GET_CODE (op0) == CONST_VECTOR
2927 && GET_CODE (op1) == CONST_VECTOR)
2928 {
2929 rtvec v = rtvec_alloc (n_elts);
2930 unsigned int i;
2931
2932 for (i = 0; i < n_elts; i++)
2933 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2934 ? CONST_VECTOR_ELT (op0, i)
2935 : CONST_VECTOR_ELT (op1, i));
2936 return gen_rtx_CONST_VECTOR (mode, v);
2937 }
2938 }
2939 break;
2940
2941 default:
2942 abort ();
2943 }
2944
2945 return 0;
2946 }
2947
2948 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2949 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2950
2951 Works by unpacking OP into a collection of 8-bit values
2952 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2953 and then repacking them again for OUTERMODE. */
2954
2955 static rtx
2956 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2957 enum machine_mode innermode, unsigned int byte)
2958 {
2959 /* We support up to 512-bit values (for V8DFmode). */
2960 enum {
2961 max_bitsize = 512,
2962 value_bit = 8,
2963 value_mask = (1 << value_bit) - 1
2964 };
2965 unsigned char value[max_bitsize / value_bit];
2966 int value_start;
2967 int i;
2968 int elem;
2969
2970 int num_elem;
2971 rtx * elems;
2972 int elem_bitsize;
2973 rtx result_s;
2974 rtvec result_v = NULL;
2975 enum mode_class outer_class;
2976 enum machine_mode outer_submode;
2977
2978 /* Some ports misuse CCmode. */
2979 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
2980 return op;
2981
2982 /* Unpack the value. */
2983
2984 if (GET_CODE (op) == CONST_VECTOR)
2985 {
2986 num_elem = CONST_VECTOR_NUNITS (op);
2987 elems = &CONST_VECTOR_ELT (op, 0);
2988 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
2989 }
2990 else
2991 {
2992 num_elem = 1;
2993 elems = &op;
2994 elem_bitsize = max_bitsize;
2995 }
2996
2997 if (BITS_PER_UNIT % value_bit != 0)
2998 abort (); /* Too complicated; reducing value_bit may help. */
2999 if (elem_bitsize % BITS_PER_UNIT != 0)
3000 abort (); /* I don't know how to handle endianness of sub-units. */
3001
3002 for (elem = 0; elem < num_elem; elem++)
3003 {
3004 unsigned char * vp;
3005 rtx el = elems[elem];
3006
3007 /* Vectors are kept in target memory order. (This is probably
3008 a mistake.) */
3009 {
3010 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3011 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3012 / BITS_PER_UNIT);
3013 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3014 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3015 unsigned bytele = (subword_byte % UNITS_PER_WORD
3016 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3017 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3018 }
3019
3020 switch (GET_CODE (el))
3021 {
3022 case CONST_INT:
3023 for (i = 0;
3024 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3025 i += value_bit)
3026 *vp++ = INTVAL (el) >> i;
3027 /* CONST_INTs are always logically sign-extended. */
3028 for (; i < elem_bitsize; i += value_bit)
3029 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3030 break;
3031
3032 case CONST_DOUBLE:
3033 if (GET_MODE (el) == VOIDmode)
3034 {
3035 /* If this triggers, someone should have generated a
3036 CONST_INT instead. */
3037 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3038 abort ();
3039
3040 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3041 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3042 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3043 {
3044 *vp++
3045 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3046 i += value_bit;
3047 }
3048 /* It shouldn't matter what's done here, so fill it with
3049 zero. */
3050 for (; i < max_bitsize; i += value_bit)
3051 *vp++ = 0;
3052 }
3053 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3054 {
3055 long tmp[max_bitsize / 32];
3056 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3057
3058 if (bitsize > elem_bitsize)
3059 abort ();
3060 if (bitsize % value_bit != 0)
3061 abort ();
3062
3063 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3064 GET_MODE (el));
3065
3066 /* real_to_target produces its result in words affected by
3067 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3068 and use WORDS_BIG_ENDIAN instead; see the documentation
3069 of SUBREG in rtl.texi. */
3070 for (i = 0; i < bitsize; i += value_bit)
3071 {
3072 int ibase;
3073 if (WORDS_BIG_ENDIAN)
3074 ibase = bitsize - 1 - i;
3075 else
3076 ibase = i;
3077 *vp++ = tmp[ibase / 32] >> i % 32;
3078 }
3079
3080 /* It shouldn't matter what's done here, so fill it with
3081 zero. */
3082 for (; i < elem_bitsize; i += value_bit)
3083 *vp++ = 0;
3084 }
3085 else
3086 abort ();
3087 break;
3088
3089 default:
3090 abort ();
3091 }
3092 }
3093
3094 /* Now, pick the right byte to start with. */
3095 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3096 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3097 will already have offset 0. */
3098 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3099 {
3100 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3101 - byte);
3102 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3103 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3104 byte = (subword_byte % UNITS_PER_WORD
3105 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3106 }
3107
3108 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3109 so if it's become negative it will instead be very large.) */
3110 if (byte >= GET_MODE_SIZE (innermode))
3111 abort ();
3112
3113 /* Convert from bytes to chunks of size value_bit. */
3114 value_start = byte * (BITS_PER_UNIT / value_bit);
3115
3116 /* Re-pack the value. */
3117
3118 if (VECTOR_MODE_P (outermode))
3119 {
3120 num_elem = GET_MODE_NUNITS (outermode);
3121 result_v = rtvec_alloc (num_elem);
3122 elems = &RTVEC_ELT (result_v, 0);
3123 outer_submode = GET_MODE_INNER (outermode);
3124 }
3125 else
3126 {
3127 num_elem = 1;
3128 elems = &result_s;
3129 outer_submode = outermode;
3130 }
3131
3132 outer_class = GET_MODE_CLASS (outer_submode);
3133 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3134
3135 if (elem_bitsize % value_bit != 0)
3136 abort ();
3137 if (elem_bitsize + value_start * value_bit > max_bitsize)
3138 abort ();
3139
3140 for (elem = 0; elem < num_elem; elem++)
3141 {
3142 unsigned char *vp;
3143
3144 /* Vectors are stored in target memory order. (This is probably
3145 a mistake.) */
3146 {
3147 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3148 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3149 / BITS_PER_UNIT);
3150 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3151 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3152 unsigned bytele = (subword_byte % UNITS_PER_WORD
3153 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3154 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3155 }
3156
3157 switch (outer_class)
3158 {
3159 case MODE_INT:
3160 case MODE_PARTIAL_INT:
3161 {
3162 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3163
3164 for (i = 0;
3165 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3166 i += value_bit)
3167 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3168 for (; i < elem_bitsize; i += value_bit)
3169 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3170 << (i - HOST_BITS_PER_WIDE_INT));
3171
3172 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3173 know why. */
3174 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3175 elems[elem] = gen_int_mode (lo, outer_submode);
3176 else
3177 elems[elem] = immed_double_const (lo, hi, outer_submode);
3178 }
3179 break;
3180
3181 case MODE_FLOAT:
3182 {
3183 REAL_VALUE_TYPE r;
3184 long tmp[max_bitsize / 32];
3185
3186 /* real_from_target wants its input in words affected by
3187 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3188 and use WORDS_BIG_ENDIAN instead; see the documentation
3189 of SUBREG in rtl.texi. */
3190 for (i = 0; i < max_bitsize / 32; i++)
3191 tmp[i] = 0;
3192 for (i = 0; i < elem_bitsize; i += value_bit)
3193 {
3194 int ibase;
3195 if (WORDS_BIG_ENDIAN)
3196 ibase = elem_bitsize - 1 - i;
3197 else
3198 ibase = i;
3199 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3200 }
3201
3202 real_from_target (&r, tmp, outer_submode);
3203 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3204 }
3205 break;
3206
3207 default:
3208 abort ();
3209 }
3210 }
3211 if (VECTOR_MODE_P (outermode))
3212 return gen_rtx_CONST_VECTOR (outermode, result_v);
3213 else
3214 return result_s;
3215 }
3216
3217 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3218 Return 0 if no simplifications are possible. */
3219 rtx
3220 simplify_subreg (enum machine_mode outermode, rtx op,
3221 enum machine_mode innermode, unsigned int byte)
3222 {
3223 /* Little bit of sanity checking. */
3224 if (innermode == VOIDmode || outermode == VOIDmode
3225 || innermode == BLKmode || outermode == BLKmode)
3226 abort ();
3227
3228 if (GET_MODE (op) != innermode
3229 && GET_MODE (op) != VOIDmode)
3230 abort ();
3231
3232 if (byte % GET_MODE_SIZE (outermode)
3233 || byte >= GET_MODE_SIZE (innermode))
3234 abort ();
3235
3236 if (outermode == innermode && !byte)
3237 return op;
3238
3239 if (GET_CODE (op) == CONST_INT
3240 || GET_CODE (op) == CONST_DOUBLE
3241 || GET_CODE (op) == CONST_VECTOR)
3242 return simplify_immed_subreg (outermode, op, innermode, byte);
3243
3244 /* Changing mode twice with SUBREG => just change it once,
3245 or not at all if changing back op starting mode. */
3246 if (GET_CODE (op) == SUBREG)
3247 {
3248 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3249 int final_offset = byte + SUBREG_BYTE (op);
3250 rtx new;
3251
3252 if (outermode == innermostmode
3253 && byte == 0 && SUBREG_BYTE (op) == 0)
3254 return SUBREG_REG (op);
3255
3256 /* The SUBREG_BYTE represents offset, as if the value were stored
3257 in memory. Irritating exception is paradoxical subreg, where
3258 we define SUBREG_BYTE to be 0. On big endian machines, this
3259 value should be negative. For a moment, undo this exception. */
3260 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3261 {
3262 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3263 if (WORDS_BIG_ENDIAN)
3264 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3265 if (BYTES_BIG_ENDIAN)
3266 final_offset += difference % UNITS_PER_WORD;
3267 }
3268 if (SUBREG_BYTE (op) == 0
3269 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3270 {
3271 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3272 if (WORDS_BIG_ENDIAN)
3273 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3274 if (BYTES_BIG_ENDIAN)
3275 final_offset += difference % UNITS_PER_WORD;
3276 }
3277
3278 /* See whether resulting subreg will be paradoxical. */
3279 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3280 {
3281 /* In nonparadoxical subregs we can't handle negative offsets. */
3282 if (final_offset < 0)
3283 return NULL_RTX;
3284 /* Bail out in case resulting subreg would be incorrect. */
3285 if (final_offset % GET_MODE_SIZE (outermode)
3286 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3287 return NULL_RTX;
3288 }
3289 else
3290 {
3291 int offset = 0;
3292 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3293
3294 /* In paradoxical subreg, see if we are still looking on lower part.
3295 If so, our SUBREG_BYTE will be 0. */
3296 if (WORDS_BIG_ENDIAN)
3297 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3298 if (BYTES_BIG_ENDIAN)
3299 offset += difference % UNITS_PER_WORD;
3300 if (offset == final_offset)
3301 final_offset = 0;
3302 else
3303 return NULL_RTX;
3304 }
3305
3306 /* Recurse for further possible simplifications. */
3307 new = simplify_subreg (outermode, SUBREG_REG (op),
3308 GET_MODE (SUBREG_REG (op)),
3309 final_offset);
3310 if (new)
3311 return new;
3312 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3313 }
3314
3315 /* SUBREG of a hard register => just change the register number
3316 and/or mode. If the hard register is not valid in that mode,
3317 suppress this simplification. If the hard register is the stack,
3318 frame, or argument pointer, leave this as a SUBREG. */
3319
3320 if (REG_P (op)
3321 && (! REG_FUNCTION_VALUE_P (op)
3322 || ! rtx_equal_function_value_matters)
3323 && REGNO (op) < FIRST_PSEUDO_REGISTER
3324 #ifdef CANNOT_CHANGE_MODE_CLASS
3325 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3326 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3327 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3328 #endif
3329 && ((reload_completed && !frame_pointer_needed)
3330 || (REGNO (op) != FRAME_POINTER_REGNUM
3331 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3332 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3333 #endif
3334 ))
3335 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3336 && REGNO (op) != ARG_POINTER_REGNUM
3337 #endif
3338 && REGNO (op) != STACK_POINTER_REGNUM
3339 && subreg_offset_representable_p (REGNO (op), innermode,
3340 byte, outermode))
3341 {
3342 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3343 int final_regno = subreg_hard_regno (tem, 0);
3344
3345 /* ??? We do allow it if the current REG is not valid for
3346 its mode. This is a kludge to work around how float/complex
3347 arguments are passed on 32-bit SPARC and should be fixed. */
3348 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3349 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3350 {
3351 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3352
3353 /* Propagate original regno. We don't have any way to specify
3354 the offset inside original regno, so do so only for lowpart.
3355 The information is used only by alias analysis that can not
3356 grog partial register anyway. */
3357
3358 if (subreg_lowpart_offset (outermode, innermode) == byte)
3359 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3360 return x;
3361 }
3362 }
3363
3364 /* If we have a SUBREG of a register that we are replacing and we are
3365 replacing it with a MEM, make a new MEM and try replacing the
3366 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3367 or if we would be widening it. */
3368
3369 if (GET_CODE (op) == MEM
3370 && ! mode_dependent_address_p (XEXP (op, 0))
3371 /* Allow splitting of volatile memory references in case we don't
3372 have instruction to move the whole thing. */
3373 && (! MEM_VOLATILE_P (op)
3374 || ! have_insn_for (SET, innermode))
3375 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3376 return adjust_address_nv (op, outermode, byte);
3377
3378 /* Handle complex values represented as CONCAT
3379 of real and imaginary part. */
3380 if (GET_CODE (op) == CONCAT)
3381 {
3382 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3383 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3384 unsigned int final_offset;
3385 rtx res;
3386
3387 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3388 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3389 if (res)
3390 return res;
3391 /* We can at least simplify it by referring directly to the relevant part. */
3392 return gen_rtx_SUBREG (outermode, part, final_offset);
3393 }
3394
3395 return NULL_RTX;
3396 }
3397
3398 /* Make a SUBREG operation or equivalent if it folds. */
3399
3400 rtx
3401 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3402 enum machine_mode innermode, unsigned int byte)
3403 {
3404 rtx new;
3405 /* Little bit of sanity checking. */
3406 if (innermode == VOIDmode || outermode == VOIDmode
3407 || innermode == BLKmode || outermode == BLKmode)
3408 abort ();
3409
3410 if (GET_MODE (op) != innermode
3411 && GET_MODE (op) != VOIDmode)
3412 abort ();
3413
3414 if (byte % GET_MODE_SIZE (outermode)
3415 || byte >= GET_MODE_SIZE (innermode))
3416 abort ();
3417
3418 if (GET_CODE (op) == QUEUED)
3419 return NULL_RTX;
3420
3421 new = simplify_subreg (outermode, op, innermode, byte);
3422 if (new)
3423 return new;
3424
3425 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3426 return NULL_RTX;
3427
3428 return gen_rtx_SUBREG (outermode, op, byte);
3429 }
3430 /* Simplify X, an rtx expression.
3431
3432 Return the simplified expression or NULL if no simplifications
3433 were possible.
3434
3435 This is the preferred entry point into the simplification routines;
3436 however, we still allow passes to call the more specific routines.
3437
3438 Right now GCC has three (yes, three) major bodies of RTL simplification
3439 code that need to be unified.
3440
3441 1. fold_rtx in cse.c. This code uses various CSE specific
3442 information to aid in RTL simplification.
3443
3444 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3445 it uses combine specific information to aid in RTL
3446 simplification.
3447
3448 3. The routines in this file.
3449
3450
3451 Long term we want to only have one body of simplification code; to
3452 get to that state I recommend the following steps:
3453
3454 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3455 which are not pass dependent state into these routines.
3456
3457 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3458 use this routine whenever possible.
3459
3460 3. Allow for pass dependent state to be provided to these
3461 routines and add simplifications based on the pass dependent
3462 state. Remove code from cse.c & combine.c that becomes
3463 redundant/dead.
3464
3465 It will take time, but ultimately the compiler will be easier to
3466 maintain and improve. It's totally silly that when we add a
3467 simplification that it needs to be added to 4 places (3 for RTL
3468 simplification and 1 for tree simplification. */
3469
3470 rtx
3471 simplify_rtx (rtx x)
3472 {
3473 enum rtx_code code = GET_CODE (x);
3474 enum machine_mode mode = GET_MODE (x);
3475 rtx temp;
3476
3477 switch (GET_RTX_CLASS (code))
3478 {
3479 case '1':
3480 return simplify_unary_operation (code, mode,
3481 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3482 case 'c':
3483 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3484 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3485
3486 /* Fall through.... */
3487
3488 case '2':
3489 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3490
3491 case '3':
3492 case 'b':
3493 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3494 XEXP (x, 0), XEXP (x, 1),
3495 XEXP (x, 2));
3496
3497 case '<':
3498 temp = simplify_relational_operation (code,
3499 ((GET_MODE (XEXP (x, 0))
3500 != VOIDmode)
3501 ? GET_MODE (XEXP (x, 0))
3502 : GET_MODE (XEXP (x, 1))),
3503 XEXP (x, 0), XEXP (x, 1));
3504 #ifdef FLOAT_STORE_FLAG_VALUE
3505 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3506 {
3507 if (temp == const0_rtx)
3508 temp = CONST0_RTX (mode);
3509 else
3510 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3511 mode);
3512 }
3513 #endif
3514 return temp;
3515
3516 case 'x':
3517 if (code == SUBREG)
3518 return simplify_gen_subreg (mode, SUBREG_REG (x),
3519 GET_MODE (SUBREG_REG (x)),
3520 SUBREG_BYTE (x));
3521 if (code == CONSTANT_P_RTX)
3522 {
3523 if (CONSTANT_P (XEXP (x, 0)))
3524 return const1_rtx;
3525 }
3526 break;
3527
3528 case 'o':
3529 if (code == LO_SUM)
3530 {
3531 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3532 if (GET_CODE (XEXP (x, 0)) == HIGH
3533 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3534 return XEXP (x, 1);
3535 }
3536 break;
3537
3538 default:
3539 break;
3540 }
3541 return NULL;
3542 }