combine.c (SHIFT_COUNT_TRUNCATED): Remove.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 \f
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
76 {
77 rtx tem;
78
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == 'c'
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
83
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
88
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
91
92 if (code == PLUS || code == MINUS)
93 {
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
97 }
98
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
100 }
101 \f
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
104 rtx
105 avoid_constant_pool_reference (rtx x)
106 {
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
109
110 switch (GET_CODE (x))
111 {
112 case MEM:
113 break;
114
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 {
121 REAL_VALUE_TYPE d;
122
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 }
126 return x;
127
128 default:
129 return x;
130 }
131
132 addr = XEXP (x, 0);
133
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
136
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
139
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
143
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
146
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
151 {
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
154 }
155
156 return c;
157 }
158 \f
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
161
162 rtx
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
165 {
166 rtx tem;
167
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
171
172 return gen_rtx_fmt_e (code, mode, op);
173 }
174
175 /* Likewise for ternary operations. */
176
177 rtx
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 {
181 rtx tem;
182
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
187
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
189 }
190 \f
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
193 */
194
195 rtx
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 {
199 rtx tem;
200
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
205
206 if (cmp_mode != VOIDmode)
207 {
208 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
209
210 if (tem)
211 {
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
214 {
215 REAL_VALUE_TYPE val;
216 if (tem == const0_rtx)
217 return CONST0_RTX (mode);
218 if (tem != const_true_rtx)
219 abort ();
220 val = FLOAT_STORE_FLAG_VALUE (mode);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
222 }
223 #endif
224 return tem;
225 }
226 }
227
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0, op1)
230 || (op0 == const0_rtx && op1 != const0_rtx))
231 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
235 return simplify_gen_relational (code, mode, VOIDmode,
236 XEXP (op0, 0), XEXP (op0, 1));
237
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
240 {
241 if (code == NE)
242 {
243 if (GET_MODE (op0) == mode)
244 return op0;
245 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
247 }
248 else if (code == EQ)
249 {
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
252 return simplify_gen_relational (new, mode, VOIDmode,
253 XEXP (op0, 0), XEXP (op0, 1));
254 }
255 }
256
257 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 }
259 \f
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
262
263 rtx
264 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 {
266 enum rtx_code code = GET_CODE (x);
267 enum machine_mode mode = GET_MODE (x);
268 enum machine_mode op_mode;
269 rtx op0, op1, op2;
270
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
274
275 if (x == old)
276 return new;
277
278 switch (GET_RTX_CLASS (code))
279 {
280 case '1':
281 op0 = XEXP (x, 0);
282 op_mode = GET_MODE (op0);
283 op0 = simplify_replace_rtx (op0, old, new);
284 if (op0 == XEXP (x, 0))
285 return x;
286 return simplify_gen_unary (code, mode, op0, op_mode);
287
288 case '2':
289 case 'c':
290 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return x;
294 return simplify_gen_binary (code, mode, op0, op1);
295
296 case '<':
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old, new);
301 op1 = simplify_replace_rtx (op1, old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306 case '3':
307 case 'b':
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319 case 'x':
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
322 {
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
330 }
331 break;
332
333 case 'o':
334 if (code == MEM)
335 {
336 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
340 }
341 else if (code == LO_SUM)
342 {
343 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
345
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
349
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
353 }
354 else if (code == REG)
355 {
356 if (REG_P (old) && REGNO (x) == REGNO (old))
357 return new;
358 }
359 break;
360
361 default:
362 break;
363 }
364 return x;
365 }
366 \f
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
370 rtx
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
373 {
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
376
377 if (code == VEC_DUPLICATE)
378 {
379 if (!VECTOR_MODE_P (mode))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE (trueop))
384 abort ();
385 if (GET_MODE (trueop) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop))
387 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
388 abort ();
389 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
390 || GET_CODE (trueop) == CONST_VECTOR)
391 {
392 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
393 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
394 rtvec v = rtvec_alloc (n_elts);
395 unsigned int i;
396
397 if (GET_CODE (trueop) != CONST_VECTOR)
398 for (i = 0; i < n_elts; i++)
399 RTVEC_ELT (v, i) = trueop;
400 else
401 {
402 enum machine_mode inmode = GET_MODE (trueop);
403 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
404 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
405
406 if (in_n_elts >= n_elts || n_elts % in_n_elts)
407 abort ();
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
410 }
411 return gen_rtx_CONST_VECTOR (mode, v);
412 }
413 }
414 else if (GET_CODE (op) == CONST)
415 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
416
417 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
418 {
419 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
420 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
421 enum machine_mode opmode = GET_MODE (trueop);
422 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
423 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
424 rtvec v = rtvec_alloc (n_elts);
425 unsigned int i;
426
427 if (op_n_elts != n_elts)
428 abort ();
429
430 for (i = 0; i < n_elts; i++)
431 {
432 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
433 CONST_VECTOR_ELT (trueop, i),
434 GET_MODE_INNER (opmode));
435 if (!x)
436 return 0;
437 RTVEC_ELT (v, i) = x;
438 }
439 return gen_rtx_CONST_VECTOR (mode, v);
440 }
441
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
445
446 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
447 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 REAL_VALUE_FROM_INT (d, lv, hv, mode);
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
460 }
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
464 {
465 HOST_WIDE_INT hv, lv;
466 REAL_VALUE_TYPE d;
467
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
470 else
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
472
473 if (op_mode == VOIDmode)
474 {
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
477 if (hv < 0)
478 return 0;
479 }
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 ;
482 else
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
484
485 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
486 d = real_value_truncate (mode, d);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
488 }
489
490 if (GET_CODE (trueop) == CONST_INT
491 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
492 {
493 HOST_WIDE_INT arg0 = INTVAL (trueop);
494 HOST_WIDE_INT val;
495
496 switch (code)
497 {
498 case NOT:
499 val = ~ arg0;
500 break;
501
502 case NEG:
503 val = - arg0;
504 break;
505
506 case ABS:
507 val = (arg0 >= 0 ? arg0 : - arg0);
508 break;
509
510 case FFS:
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0 &= GET_MODE_MASK (mode);
514 val = exact_log2 (arg0 & (- arg0)) + 1;
515 break;
516
517 case CLZ:
518 arg0 &= GET_MODE_MASK (mode);
519 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
520 ;
521 else
522 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
523 break;
524
525 case CTZ:
526 arg0 &= GET_MODE_MASK (mode);
527 if (arg0 == 0)
528 {
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
532 val = GET_MODE_BITSIZE (mode);
533 }
534 else
535 val = exact_log2 (arg0 & -arg0);
536 break;
537
538 case POPCOUNT:
539 arg0 &= GET_MODE_MASK (mode);
540 val = 0;
541 while (arg0)
542 val++, arg0 &= arg0 - 1;
543 break;
544
545 case PARITY:
546 arg0 &= GET_MODE_MASK (mode);
547 val = 0;
548 while (arg0)
549 val++, arg0 &= arg0 - 1;
550 val &= 1;
551 break;
552
553 case TRUNCATE:
554 val = arg0;
555 break;
556
557 case ZERO_EXTEND:
558 /* When zero-extending a CONST_INT, we need to know its
559 original mode. */
560 if (op_mode == VOIDmode)
561 abort ();
562 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
563 {
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width != GET_MODE_BITSIZE (op_mode))
568 abort ();
569 val = arg0;
570 }
571 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
572 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
573 else
574 return 0;
575 break;
576
577 case SIGN_EXTEND:
578 if (op_mode == VOIDmode)
579 op_mode = mode;
580 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
581 {
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width != GET_MODE_BITSIZE (op_mode))
586 abort ();
587 val = arg0;
588 }
589 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
590 {
591 val
592 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
593 if (val
594 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
595 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 }
597 else
598 return 0;
599 break;
600
601 case SQRT:
602 case FLOAT_EXTEND:
603 case FLOAT_TRUNCATE:
604 case SS_TRUNCATE:
605 case US_TRUNCATE:
606 return 0;
607
608 default:
609 abort ();
610 }
611
612 val = trunc_int_for_mode (val, mode);
613
614 return GEN_INT (val);
615 }
616
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop) == VOIDmode
620 && width <= HOST_BITS_PER_WIDE_INT * 2
621 && (GET_CODE (trueop) == CONST_DOUBLE
622 || GET_CODE (trueop) == CONST_INT))
623 {
624 unsigned HOST_WIDE_INT l1, lv;
625 HOST_WIDE_INT h1, hv;
626
627 if (GET_CODE (trueop) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
629 else
630 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
631
632 switch (code)
633 {
634 case NOT:
635 lv = ~ l1;
636 hv = ~ h1;
637 break;
638
639 case NEG:
640 neg_double (l1, h1, &lv, &hv);
641 break;
642
643 case ABS:
644 if (h1 < 0)
645 neg_double (l1, h1, &lv, &hv);
646 else
647 lv = l1, hv = h1;
648 break;
649
650 case FFS:
651 hv = 0;
652 if (l1 == 0)
653 {
654 if (h1 == 0)
655 lv = 0;
656 else
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
658 }
659 else
660 lv = exact_log2 (l1 & -l1) + 1;
661 break;
662
663 case CLZ:
664 hv = 0;
665 if (h1 != 0)
666 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
667 - HOST_BITS_PER_WIDE_INT;
668 else if (l1 != 0)
669 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
671 lv = GET_MODE_BITSIZE (mode);
672 break;
673
674 case CTZ:
675 hv = 0;
676 if (l1 != 0)
677 lv = exact_log2 (l1 & -l1);
678 else if (h1 != 0)
679 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
682 break;
683
684 case POPCOUNT:
685 hv = 0;
686 lv = 0;
687 while (l1)
688 lv++, l1 &= l1 - 1;
689 while (h1)
690 lv++, h1 &= h1 - 1;
691 break;
692
693 case PARITY:
694 hv = 0;
695 lv = 0;
696 while (l1)
697 lv++, l1 &= l1 - 1;
698 while (h1)
699 lv++, h1 &= h1 - 1;
700 lv &= 1;
701 break;
702
703 case TRUNCATE:
704 /* This is just a change-of-mode, so do nothing. */
705 lv = l1, hv = h1;
706 break;
707
708 case ZERO_EXTEND:
709 if (op_mode == VOIDmode)
710 abort ();
711
712 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
713 return 0;
714
715 hv = 0;
716 lv = l1 & GET_MODE_MASK (op_mode);
717 break;
718
719 case SIGN_EXTEND:
720 if (op_mode == VOIDmode
721 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
722 return 0;
723 else
724 {
725 lv = l1 & GET_MODE_MASK (op_mode);
726 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
727 && (lv & ((HOST_WIDE_INT) 1
728 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
729 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
730
731 hv = HWI_SIGN_EXTEND (lv);
732 }
733 break;
734
735 case SQRT:
736 return 0;
737
738 default:
739 return 0;
740 }
741
742 return immed_double_const (lv, hv, mode);
743 }
744
745 else if (GET_CODE (trueop) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode) == MODE_FLOAT)
747 {
748 REAL_VALUE_TYPE d, t;
749 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
750
751 switch (code)
752 {
753 case SQRT:
754 if (HONOR_SNANS (mode) && real_isnan (&d))
755 return 0;
756 real_sqrt (&t, mode, &d);
757 d = t;
758 break;
759 case ABS:
760 d = REAL_VALUE_ABS (d);
761 break;
762 case NEG:
763 d = REAL_VALUE_NEGATE (d);
764 break;
765 case FLOAT_TRUNCATE:
766 d = real_value_truncate (mode, d);
767 break;
768 case FLOAT_EXTEND:
769 /* All this does is change the mode. */
770 break;
771 case FIX:
772 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
773 break;
774 case NOT:
775 {
776 long tmp[4];
777 int i;
778
779 real_to_target (tmp, &d, GET_MODE (trueop));
780 for (i = 0; i < 4; i++)
781 tmp[i] = ~tmp[i];
782 real_from_target (&d, tmp, mode);
783 }
784 default:
785 abort ();
786 }
787 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
788 }
789
790 else if (GET_CODE (trueop) == CONST_DOUBLE
791 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
792 && GET_MODE_CLASS (mode) == MODE_INT
793 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
794 {
795 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
796 operators are intentionally left unspecified (to ease implementation
797 by target backends), for consistency, this routine implements the
798 same semantics for constant folding as used by the middle-end. */
799
800 HOST_WIDE_INT xh, xl, th, tl;
801 REAL_VALUE_TYPE x, t;
802 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
803 switch (code)
804 {
805 case FIX:
806 if (REAL_VALUE_ISNAN (x))
807 return const0_rtx;
808
809 /* Test against the signed upper bound. */
810 if (width > HOST_BITS_PER_WIDE_INT)
811 {
812 th = ((unsigned HOST_WIDE_INT) 1
813 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
814 tl = -1;
815 }
816 else
817 {
818 th = 0;
819 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
820 }
821 real_from_integer (&t, VOIDmode, tl, th, 0);
822 if (REAL_VALUES_LESS (t, x))
823 {
824 xh = th;
825 xl = tl;
826 break;
827 }
828
829 /* Test against the signed lower bound. */
830 if (width > HOST_BITS_PER_WIDE_INT)
831 {
832 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
833 tl = 0;
834 }
835 else
836 {
837 th = -1;
838 tl = (HOST_WIDE_INT) -1 << (width - 1);
839 }
840 real_from_integer (&t, VOIDmode, tl, th, 0);
841 if (REAL_VALUES_LESS (x, t))
842 {
843 xh = th;
844 xl = tl;
845 break;
846 }
847 REAL_VALUE_TO_INT (&xl, &xh, x);
848 break;
849
850 case UNSIGNED_FIX:
851 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
852 return const0_rtx;
853
854 /* Test against the unsigned upper bound. */
855 if (width == 2*HOST_BITS_PER_WIDE_INT)
856 {
857 th = -1;
858 tl = -1;
859 }
860 else if (width >= HOST_BITS_PER_WIDE_INT)
861 {
862 th = ((unsigned HOST_WIDE_INT) 1
863 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
864 tl = -1;
865 }
866 else
867 {
868 th = 0;
869 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
870 }
871 real_from_integer (&t, VOIDmode, tl, th, 1);
872 if (REAL_VALUES_LESS (t, x))
873 {
874 xh = th;
875 xl = tl;
876 break;
877 }
878
879 REAL_VALUE_TO_INT (&xl, &xh, x);
880 break;
881
882 default:
883 abort ();
884 }
885 return immed_double_const (xl, xh, mode);
886 }
887
888 /* This was formerly used only for non-IEEE float.
889 eggert@twinsun.com says it is safe for IEEE also. */
890 else
891 {
892 enum rtx_code reversed;
893 rtx temp;
894
895 /* There are some simplifications we can do even if the operands
896 aren't constant. */
897 switch (code)
898 {
899 case NOT:
900 /* (not (not X)) == X. */
901 if (GET_CODE (op) == NOT)
902 return XEXP (op, 0);
903
904 /* (not (eq X Y)) == (ne X Y), etc. */
905 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
906 && (mode == BImode || STORE_FLAG_VALUE == -1)
907 && ((reversed = reversed_comparison_code (op, NULL_RTX))
908 != UNKNOWN))
909 return simplify_gen_relational (reversed, mode, VOIDmode,
910 XEXP (op, 0), XEXP (op, 1));
911
912 /* (not (plus X -1)) can become (neg X). */
913 if (GET_CODE (op) == PLUS
914 && XEXP (op, 1) == constm1_rtx)
915 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
916
917 /* Similarly, (not (neg X)) is (plus X -1). */
918 if (GET_CODE (op) == NEG)
919 return plus_constant (XEXP (op, 0), -1);
920
921 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
922 if (GET_CODE (op) == XOR
923 && GET_CODE (XEXP (op, 1)) == CONST_INT
924 && (temp = simplify_unary_operation (NOT, mode,
925 XEXP (op, 1),
926 mode)) != 0)
927 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
928
929
930 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
931 operands other than 1, but that is not valid. We could do a
932 similar simplification for (not (lshiftrt C X)) where C is
933 just the sign bit, but this doesn't seem common enough to
934 bother with. */
935 if (GET_CODE (op) == ASHIFT
936 && XEXP (op, 0) == const1_rtx)
937 {
938 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
939 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
940 }
941
942 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
943 by reversing the comparison code if valid. */
944 if (STORE_FLAG_VALUE == -1
945 && GET_RTX_CLASS (GET_CODE (op)) == '<'
946 && (reversed = reversed_comparison_code (op, NULL_RTX))
947 != UNKNOWN)
948 return simplify_gen_relational (reversed, mode, VOIDmode,
949 XEXP (op, 0), XEXP (op, 1));
950
951 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
952 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
953 so we can perform the above simplification. */
954
955 if (STORE_FLAG_VALUE == -1
956 && GET_CODE (op) == ASHIFTRT
957 && GET_CODE (XEXP (op, 1)) == CONST_INT
958 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
959 return simplify_gen_relational (GE, mode, VOIDmode,
960 XEXP (op, 0), const0_rtx);
961
962 break;
963
964 case NEG:
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op) == NEG)
967 return XEXP (op, 0);
968
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op) == PLUS
971 && XEXP (op, 1) == const1_rtx)
972 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op) == NOT)
976 return plus_constant (XEXP (op, 0), 1);
977
978 /* (neg (minus X Y)) can become (minus Y X). This transformation
979 isn't safe for modes with signed zeros, since if X and Y are
980 both +0, (minus Y X) is the same as (minus X Y). If the
981 rounding mode is towards +infinity (or -infinity) then the two
982 expressions will be rounded differently. */
983 if (GET_CODE (op) == MINUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
986 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
987 XEXP (op, 0));
988
989 if (GET_CODE (op) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
992 {
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (GET_CODE (XEXP (op, 1)) == CONST_INT
995 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
996 {
997 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
998 mode);
999 if (temp)
1000 return simplify_gen_binary (MINUS, mode, temp,
1001 XEXP (op, 0));
1002 }
1003
1004 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1005 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1006 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1007 }
1008
1009 /* (neg (mult A B)) becomes (mult (neg A) B).
1010 This works even for floating-point values. */
1011 if (GET_CODE (op) == MULT
1012 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1013 {
1014 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1015 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1016 }
1017
1018 /* NEG commutes with ASHIFT since it is multiplication. Only do
1019 this if we can then eliminate the NEG (e.g., if the operand
1020 is a constant). */
1021 if (GET_CODE (op) == ASHIFT)
1022 {
1023 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1024 mode);
1025 if (temp)
1026 return simplify_gen_binary (ASHIFT, mode, temp,
1027 XEXP (op, 1));
1028 }
1029
1030 break;
1031
1032 case SIGN_EXTEND:
1033 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1034 becomes just the MINUS if its mode is MODE. This allows
1035 folding switch statements on machines using casesi (such as
1036 the VAX). */
1037 if (GET_CODE (op) == TRUNCATE
1038 && GET_MODE (XEXP (op, 0)) == mode
1039 && GET_CODE (XEXP (op, 0)) == MINUS
1040 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1041 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1042 return XEXP (op, 0);
1043
1044 /* Check for a sign extension of a subreg of a promoted
1045 variable, where the promotion is sign-extended, and the
1046 target mode is the same as the variable's promotion. */
1047 if (GET_CODE (op) == SUBREG
1048 && SUBREG_PROMOTED_VAR_P (op)
1049 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1050 && GET_MODE (XEXP (op, 0)) == mode)
1051 return XEXP (op, 0);
1052
1053 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1054 if (! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1056 && (CONSTANT_P (op)
1057 || (GET_CODE (op) == SUBREG
1058 && GET_CODE (SUBREG_REG (op)) == REG
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1062 #endif
1063 break;
1064
1065 case ZERO_EXTEND:
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op)
1072 && GET_MODE (XEXP (op, 0)) == mode)
1073 return XEXP (op, 0);
1074
1075 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1076 if (POINTERS_EXTEND_UNSIGNED > 0
1077 && mode == Pmode && GET_MODE (op) == ptr_mode
1078 && (CONSTANT_P (op)
1079 || (GET_CODE (op) == SUBREG
1080 && GET_CODE (SUBREG_REG (op)) == REG
1081 && REG_POINTER (SUBREG_REG (op))
1082 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1083 return convert_memory_address (Pmode, op);
1084 #endif
1085 break;
1086
1087 default:
1088 break;
1089 }
1090
1091 return 0;
1092 }
1093 }
1094 \f
1095 /* Subroutine of simplify_binary_operation to simplify a commutative,
1096 associative binary operation CODE with result mode MODE, operating
1097 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1098 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1099 canonicalization is possible. */
1100
1101 static rtx
1102 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1103 rtx op0, rtx op1)
1104 {
1105 rtx tem;
1106
1107 /* Linearize the operator to the left. */
1108 if (GET_CODE (op1) == code)
1109 {
1110 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1111 if (GET_CODE (op0) == code)
1112 {
1113 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1114 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1115 }
1116
1117 /* "a op (b op c)" becomes "(b op c) op a". */
1118 if (! swap_commutative_operands_p (op1, op0))
1119 return simplify_gen_binary (code, mode, op1, op0);
1120
1121 tem = op0;
1122 op0 = op1;
1123 op1 = tem;
1124 }
1125
1126 if (GET_CODE (op0) == code)
1127 {
1128 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1129 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1130 {
1131 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1132 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1133 }
1134
1135 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1136 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1137 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1138 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1139 if (tem != 0)
1140 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1141
1142 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1143 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1144 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1145 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1146 if (tem != 0)
1147 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1148 }
1149
1150 return 0;
1151 }
1152
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1155
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1158 rtx
1159 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1160 rtx op0, rtx op1)
1161 {
1162 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1163 HOST_WIDE_INT val;
1164 unsigned int width = GET_MODE_BITSIZE (mode);
1165 rtx trueop0, trueop1;
1166 rtx tem;
1167
1168 /* Relational operations don't work here. We must know the mode
1169 of the operands in order to do the comparison correctly.
1170 Assuming a full word can give incorrect results.
1171 Consider comparing 128 with -128 in QImode. */
1172
1173 if (GET_RTX_CLASS (code) == '<')
1174 abort ();
1175
1176 /* Make sure the constant is second. */
1177 if (GET_RTX_CLASS (code) == 'c'
1178 && swap_commutative_operands_p (op0, op1))
1179 {
1180 tem = op0, op0 = op1, op1 = tem;
1181 }
1182
1183 trueop0 = avoid_constant_pool_reference (op0);
1184 trueop1 = avoid_constant_pool_reference (op1);
1185
1186 if (VECTOR_MODE_P (mode)
1187 && GET_CODE (trueop0) == CONST_VECTOR
1188 && GET_CODE (trueop1) == CONST_VECTOR)
1189 {
1190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1192 enum machine_mode op0mode = GET_MODE (trueop0);
1193 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1194 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1195 enum machine_mode op1mode = GET_MODE (trueop1);
1196 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1197 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1198 rtvec v = rtvec_alloc (n_elts);
1199 unsigned int i;
1200
1201 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1202 abort ();
1203
1204 for (i = 0; i < n_elts; i++)
1205 {
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1209 if (!x)
1210 return 0;
1211 RTVEC_ELT (v, i) = x;
1212 }
1213
1214 return gen_rtx_CONST_VECTOR (mode, v);
1215 }
1216
1217 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1218 && GET_CODE (trueop0) == CONST_DOUBLE
1219 && GET_CODE (trueop1) == CONST_DOUBLE
1220 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1221 {
1222 if (code == AND
1223 || code == IOR
1224 || code == XOR)
1225 {
1226 long tmp0[4];
1227 long tmp1[4];
1228 REAL_VALUE_TYPE r;
1229 int i;
1230
1231 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1232 GET_MODE (op0));
1233 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1234 GET_MODE (op1));
1235 for (i = 0; i < 4; i++)
1236 {
1237 if (code == AND)
1238 tmp0[i] &= tmp1[i];
1239 else if (code == IOR)
1240 tmp0[i] |= tmp1[i];
1241 else if (code == XOR)
1242 tmp0[i] ^= tmp1[i];
1243 else
1244 abort ();
1245 }
1246 real_from_target (&r, tmp0, mode);
1247 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1248 }
1249 else
1250 {
1251 REAL_VALUE_TYPE f0, f1, value;
1252
1253 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1254 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1255 f0 = real_value_truncate (mode, f0);
1256 f1 = real_value_truncate (mode, f1);
1257
1258 if (HONOR_SNANS (mode)
1259 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1260 return 0;
1261
1262 if (code == DIV
1263 && REAL_VALUES_EQUAL (f1, dconst0)
1264 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1265 return 0;
1266
1267 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1268
1269 value = real_value_truncate (mode, value);
1270 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1271 }
1272 }
1273
1274 /* We can fold some multi-word operations. */
1275 if (GET_MODE_CLASS (mode) == MODE_INT
1276 && width == HOST_BITS_PER_WIDE_INT * 2
1277 && (GET_CODE (trueop0) == CONST_DOUBLE
1278 || GET_CODE (trueop0) == CONST_INT)
1279 && (GET_CODE (trueop1) == CONST_DOUBLE
1280 || GET_CODE (trueop1) == CONST_INT))
1281 {
1282 unsigned HOST_WIDE_INT l1, l2, lv;
1283 HOST_WIDE_INT h1, h2, hv;
1284
1285 if (GET_CODE (trueop0) == CONST_DOUBLE)
1286 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1287 else
1288 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1289
1290 if (GET_CODE (trueop1) == CONST_DOUBLE)
1291 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1292 else
1293 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1294
1295 switch (code)
1296 {
1297 case MINUS:
1298 /* A - B == A + (-B). */
1299 neg_double (l2, h2, &lv, &hv);
1300 l2 = lv, h2 = hv;
1301
1302 /* Fall through.... */
1303
1304 case PLUS:
1305 add_double (l1, h1, l2, h2, &lv, &hv);
1306 break;
1307
1308 case MULT:
1309 mul_double (l1, h1, l2, h2, &lv, &hv);
1310 break;
1311
1312 case DIV: case MOD: case UDIV: case UMOD:
1313 /* We'd need to include tree.h to do this and it doesn't seem worth
1314 it. */
1315 return 0;
1316
1317 case AND:
1318 lv = l1 & l2, hv = h1 & h2;
1319 break;
1320
1321 case IOR:
1322 lv = l1 | l2, hv = h1 | h2;
1323 break;
1324
1325 case XOR:
1326 lv = l1 ^ l2, hv = h1 ^ h2;
1327 break;
1328
1329 case SMIN:
1330 if (h1 < h2
1331 || (h1 == h2
1332 && ((unsigned HOST_WIDE_INT) l1
1333 < (unsigned HOST_WIDE_INT) l2)))
1334 lv = l1, hv = h1;
1335 else
1336 lv = l2, hv = h2;
1337 break;
1338
1339 case SMAX:
1340 if (h1 > h2
1341 || (h1 == h2
1342 && ((unsigned HOST_WIDE_INT) l1
1343 > (unsigned HOST_WIDE_INT) l2)))
1344 lv = l1, hv = h1;
1345 else
1346 lv = l2, hv = h2;
1347 break;
1348
1349 case UMIN:
1350 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1351 || (h1 == h2
1352 && ((unsigned HOST_WIDE_INT) l1
1353 < (unsigned HOST_WIDE_INT) l2)))
1354 lv = l1, hv = h1;
1355 else
1356 lv = l2, hv = h2;
1357 break;
1358
1359 case UMAX:
1360 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1361 || (h1 == h2
1362 && ((unsigned HOST_WIDE_INT) l1
1363 > (unsigned HOST_WIDE_INT) l2)))
1364 lv = l1, hv = h1;
1365 else
1366 lv = l2, hv = h2;
1367 break;
1368
1369 case LSHIFTRT: case ASHIFTRT:
1370 case ASHIFT:
1371 case ROTATE: case ROTATERT:
1372 if (SHIFT_COUNT_TRUNCATED)
1373 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1374
1375 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1376 return 0;
1377
1378 if (code == LSHIFTRT || code == ASHIFTRT)
1379 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1380 code == ASHIFTRT);
1381 else if (code == ASHIFT)
1382 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1383 else if (code == ROTATE)
1384 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1385 else /* code == ROTATERT */
1386 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1387 break;
1388
1389 default:
1390 return 0;
1391 }
1392
1393 return immed_double_const (lv, hv, mode);
1394 }
1395
1396 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1397 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1398 {
1399 /* Even if we can't compute a constant result,
1400 there are some cases worth simplifying. */
1401
1402 switch (code)
1403 {
1404 case PLUS:
1405 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1406 when x is NaN, infinite, or finite and nonzero. They aren't
1407 when x is -0 and the rounding mode is not towards -infinity,
1408 since (-0) + 0 is then 0. */
1409 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1410 return op0;
1411
1412 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1413 transformations are safe even for IEEE. */
1414 if (GET_CODE (op0) == NEG)
1415 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1416 else if (GET_CODE (op1) == NEG)
1417 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1418
1419 /* (~a) + 1 -> -a */
1420 if (INTEGRAL_MODE_P (mode)
1421 && GET_CODE (op0) == NOT
1422 && trueop1 == const1_rtx)
1423 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1424
1425 /* Handle both-operands-constant cases. We can only add
1426 CONST_INTs to constants since the sum of relocatable symbols
1427 can't be handled by most assemblers. Don't add CONST_INT
1428 to CONST_INT since overflow won't be computed properly if wider
1429 than HOST_BITS_PER_WIDE_INT. */
1430
1431 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1432 && GET_CODE (op1) == CONST_INT)
1433 return plus_constant (op0, INTVAL (op1));
1434 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1435 && GET_CODE (op0) == CONST_INT)
1436 return plus_constant (op1, INTVAL (op0));
1437
1438 /* See if this is something like X * C - X or vice versa or
1439 if the multiplication is written as a shift. If so, we can
1440 distribute and make a new multiply, shift, or maybe just
1441 have X (if C is 2 in the example above). But don't make
1442 real multiply if we didn't have one before. */
1443
1444 if (! FLOAT_MODE_P (mode))
1445 {
1446 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1447 rtx lhs = op0, rhs = op1;
1448 int had_mult = 0;
1449
1450 if (GET_CODE (lhs) == NEG)
1451 coeff0 = -1, lhs = XEXP (lhs, 0);
1452 else if (GET_CODE (lhs) == MULT
1453 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1454 {
1455 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1456 had_mult = 1;
1457 }
1458 else if (GET_CODE (lhs) == ASHIFT
1459 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1460 && INTVAL (XEXP (lhs, 1)) >= 0
1461 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1462 {
1463 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1464 lhs = XEXP (lhs, 0);
1465 }
1466
1467 if (GET_CODE (rhs) == NEG)
1468 coeff1 = -1, rhs = XEXP (rhs, 0);
1469 else if (GET_CODE (rhs) == MULT
1470 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1471 {
1472 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1473 had_mult = 1;
1474 }
1475 else if (GET_CODE (rhs) == ASHIFT
1476 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1477 && INTVAL (XEXP (rhs, 1)) >= 0
1478 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1479 {
1480 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1481 rhs = XEXP (rhs, 0);
1482 }
1483
1484 if (rtx_equal_p (lhs, rhs))
1485 {
1486 tem = simplify_gen_binary (MULT, mode, lhs,
1487 GEN_INT (coeff0 + coeff1));
1488 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1489 }
1490 }
1491
1492 /* If one of the operands is a PLUS or a MINUS, see if we can
1493 simplify this by the associative law.
1494 Don't use the associative law for floating point.
1495 The inaccuracy makes it nonassociative,
1496 and subtle programs can break if operations are associated. */
1497
1498 if (INTEGRAL_MODE_P (mode)
1499 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1500 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1501 || (GET_CODE (op0) == CONST
1502 && GET_CODE (XEXP (op0, 0)) == PLUS)
1503 || (GET_CODE (op1) == CONST
1504 && GET_CODE (XEXP (op1, 0)) == PLUS))
1505 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1506 return tem;
1507
1508 /* Reassociate floating point addition only when the user
1509 specifies unsafe math optimizations. */
1510 if (FLOAT_MODE_P (mode)
1511 && flag_unsafe_math_optimizations)
1512 {
1513 tem = simplify_associative_operation (code, mode, op0, op1);
1514 if (tem)
1515 return tem;
1516 }
1517 break;
1518
1519 case COMPARE:
1520 #ifdef HAVE_cc0
1521 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1522 using cc0, in which case we want to leave it as a COMPARE
1523 so we can distinguish it from a register-register-copy.
1524
1525 In IEEE floating point, x-0 is not the same as x. */
1526
1527 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1528 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1529 && trueop1 == CONST0_RTX (mode))
1530 return op0;
1531 #endif
1532
1533 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1534 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1535 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1536 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1537 {
1538 rtx xop00 = XEXP (op0, 0);
1539 rtx xop10 = XEXP (op1, 0);
1540
1541 #ifdef HAVE_cc0
1542 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1543 #else
1544 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1545 && GET_MODE (xop00) == GET_MODE (xop10)
1546 && REGNO (xop00) == REGNO (xop10)
1547 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1548 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1549 #endif
1550 return xop00;
1551 }
1552 break;
1553
1554 case MINUS:
1555 /* We can't assume x-x is 0 even with non-IEEE floating point,
1556 but since it is zero except in very strange circumstances, we
1557 will treat it as zero with -funsafe-math-optimizations. */
1558 if (rtx_equal_p (trueop0, trueop1)
1559 && ! side_effects_p (op0)
1560 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1561 return CONST0_RTX (mode);
1562
1563 /* Change subtraction from zero into negation. (0 - x) is the
1564 same as -x when x is NaN, infinite, or finite and nonzero.
1565 But if the mode has signed zeros, and does not round towards
1566 -infinity, then 0 - 0 is 0, not -0. */
1567 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1568 return simplify_gen_unary (NEG, mode, op1, mode);
1569
1570 /* (-1 - a) is ~a. */
1571 if (trueop0 == constm1_rtx)
1572 return simplify_gen_unary (NOT, mode, op1, mode);
1573
1574 /* Subtracting 0 has no effect unless the mode has signed zeros
1575 and supports rounding towards -infinity. In such a case,
1576 0 - 0 is -0. */
1577 if (!(HONOR_SIGNED_ZEROS (mode)
1578 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1579 && trueop1 == CONST0_RTX (mode))
1580 return op0;
1581
1582 /* See if this is something like X * C - X or vice versa or
1583 if the multiplication is written as a shift. If so, we can
1584 distribute and make a new multiply, shift, or maybe just
1585 have X (if C is 2 in the example above). But don't make
1586 real multiply if we didn't have one before. */
1587
1588 if (! FLOAT_MODE_P (mode))
1589 {
1590 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1591 rtx lhs = op0, rhs = op1;
1592 int had_mult = 0;
1593
1594 if (GET_CODE (lhs) == NEG)
1595 coeff0 = -1, lhs = XEXP (lhs, 0);
1596 else if (GET_CODE (lhs) == MULT
1597 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1598 {
1599 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1600 had_mult = 1;
1601 }
1602 else if (GET_CODE (lhs) == ASHIFT
1603 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1604 && INTVAL (XEXP (lhs, 1)) >= 0
1605 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1606 {
1607 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1608 lhs = XEXP (lhs, 0);
1609 }
1610
1611 if (GET_CODE (rhs) == NEG)
1612 coeff1 = - 1, rhs = XEXP (rhs, 0);
1613 else if (GET_CODE (rhs) == MULT
1614 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1615 {
1616 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1617 had_mult = 1;
1618 }
1619 else if (GET_CODE (rhs) == ASHIFT
1620 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1621 && INTVAL (XEXP (rhs, 1)) >= 0
1622 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1623 {
1624 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1625 rhs = XEXP (rhs, 0);
1626 }
1627
1628 if (rtx_equal_p (lhs, rhs))
1629 {
1630 tem = simplify_gen_binary (MULT, mode, lhs,
1631 GEN_INT (coeff0 - coeff1));
1632 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1633 }
1634 }
1635
1636 /* (a - (-b)) -> (a + b). True even for IEEE. */
1637 if (GET_CODE (op1) == NEG)
1638 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1639
1640 /* (-x - c) may be simplified as (-c - x). */
1641 if (GET_CODE (op0) == NEG
1642 && (GET_CODE (op1) == CONST_INT
1643 || GET_CODE (op1) == CONST_DOUBLE))
1644 {
1645 tem = simplify_unary_operation (NEG, mode, op1, mode);
1646 if (tem)
1647 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1648 }
1649
1650 /* If one of the operands is a PLUS or a MINUS, see if we can
1651 simplify this by the associative law.
1652 Don't use the associative law for floating point.
1653 The inaccuracy makes it nonassociative,
1654 and subtle programs can break if operations are associated. */
1655
1656 if (INTEGRAL_MODE_P (mode)
1657 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1658 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1659 || (GET_CODE (op0) == CONST
1660 && GET_CODE (XEXP (op0, 0)) == PLUS)
1661 || (GET_CODE (op1) == CONST
1662 && GET_CODE (XEXP (op1, 0)) == PLUS))
1663 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1664 return tem;
1665
1666 /* Don't let a relocatable value get a negative coeff. */
1667 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1668 return simplify_gen_binary (PLUS, mode,
1669 op0,
1670 neg_const_int (mode, op1));
1671
1672 /* (x - (x & y)) -> (x & ~y) */
1673 if (GET_CODE (op1) == AND)
1674 {
1675 if (rtx_equal_p (op0, XEXP (op1, 0)))
1676 {
1677 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1678 GET_MODE (XEXP (op1, 1)));
1679 return simplify_gen_binary (AND, mode, op0, tem);
1680 }
1681 if (rtx_equal_p (op0, XEXP (op1, 1)))
1682 {
1683 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1684 GET_MODE (XEXP (op1, 0)));
1685 return simplify_gen_binary (AND, mode, op0, tem);
1686 }
1687 }
1688 break;
1689
1690 case MULT:
1691 if (trueop1 == constm1_rtx)
1692 return simplify_gen_unary (NEG, mode, op0, mode);
1693
1694 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1695 x is NaN, since x * 0 is then also NaN. Nor is it valid
1696 when the mode has signed zeros, since multiplying a negative
1697 number by 0 will give -0, not 0. */
1698 if (!HONOR_NANS (mode)
1699 && !HONOR_SIGNED_ZEROS (mode)
1700 && trueop1 == CONST0_RTX (mode)
1701 && ! side_effects_p (op0))
1702 return op1;
1703
1704 /* In IEEE floating point, x*1 is not equivalent to x for
1705 signalling NaNs. */
1706 if (!HONOR_SNANS (mode)
1707 && trueop1 == CONST1_RTX (mode))
1708 return op0;
1709
1710 /* Convert multiply by constant power of two into shift unless
1711 we are still generating RTL. This test is a kludge. */
1712 if (GET_CODE (trueop1) == CONST_INT
1713 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1714 /* If the mode is larger than the host word size, and the
1715 uppermost bit is set, then this isn't a power of two due
1716 to implicit sign extension. */
1717 && (width <= HOST_BITS_PER_WIDE_INT
1718 || val != HOST_BITS_PER_WIDE_INT - 1)
1719 && ! rtx_equal_function_value_matters)
1720 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1721
1722 /* x*2 is x+x and x*(-1) is -x */
1723 if (GET_CODE (trueop1) == CONST_DOUBLE
1724 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1725 && GET_MODE (op0) == mode)
1726 {
1727 REAL_VALUE_TYPE d;
1728 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1729
1730 if (REAL_VALUES_EQUAL (d, dconst2))
1731 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1732
1733 if (REAL_VALUES_EQUAL (d, dconstm1))
1734 return simplify_gen_unary (NEG, mode, op0, mode);
1735 }
1736
1737 /* Reassociate multiplication, but for floating point MULTs
1738 only when the user specifies unsafe math optimizations. */
1739 if (! FLOAT_MODE_P (mode)
1740 || flag_unsafe_math_optimizations)
1741 {
1742 tem = simplify_associative_operation (code, mode, op0, op1);
1743 if (tem)
1744 return tem;
1745 }
1746 break;
1747
1748 case IOR:
1749 if (trueop1 == const0_rtx)
1750 return op0;
1751 if (GET_CODE (trueop1) == CONST_INT
1752 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1753 == GET_MODE_MASK (mode)))
1754 return op1;
1755 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1756 return op0;
1757 /* A | (~A) -> -1 */
1758 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1759 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1760 && ! side_effects_p (op0)
1761 && GET_MODE_CLASS (mode) != MODE_CC)
1762 return constm1_rtx;
1763 tem = simplify_associative_operation (code, mode, op0, op1);
1764 if (tem)
1765 return tem;
1766 break;
1767
1768 case XOR:
1769 if (trueop1 == const0_rtx)
1770 return op0;
1771 if (GET_CODE (trueop1) == CONST_INT
1772 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1773 == GET_MODE_MASK (mode)))
1774 return simplify_gen_unary (NOT, mode, op0, mode);
1775 if (trueop0 == trueop1 && ! side_effects_p (op0)
1776 && GET_MODE_CLASS (mode) != MODE_CC)
1777 return const0_rtx;
1778 tem = simplify_associative_operation (code, mode, op0, op1);
1779 if (tem)
1780 return tem;
1781 break;
1782
1783 case AND:
1784 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1785 return const0_rtx;
1786 if (GET_CODE (trueop1) == CONST_INT
1787 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1788 == GET_MODE_MASK (mode)))
1789 return op0;
1790 if (trueop0 == trueop1 && ! side_effects_p (op0)
1791 && GET_MODE_CLASS (mode) != MODE_CC)
1792 return op0;
1793 /* A & (~A) -> 0 */
1794 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1795 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1796 && ! side_effects_p (op0)
1797 && GET_MODE_CLASS (mode) != MODE_CC)
1798 return const0_rtx;
1799 tem = simplify_associative_operation (code, mode, op0, op1);
1800 if (tem)
1801 return tem;
1802 break;
1803
1804 case UDIV:
1805 /* Convert divide by power of two into shift (divide by 1 handled
1806 below). */
1807 if (GET_CODE (trueop1) == CONST_INT
1808 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1809 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1810
1811 /* Fall through.... */
1812
1813 case DIV:
1814 if (trueop1 == CONST1_RTX (mode))
1815 {
1816 /* On some platforms DIV uses narrower mode than its
1817 operands. */
1818 rtx x = gen_lowpart_common (mode, op0);
1819 if (x)
1820 return x;
1821 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1822 return gen_lowpart_SUBREG (mode, op0);
1823 else
1824 return op0;
1825 }
1826
1827 /* Maybe change 0 / x to 0. This transformation isn't safe for
1828 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1829 Nor is it safe for modes with signed zeros, since dividing
1830 0 by a negative number gives -0, not 0. */
1831 if (!HONOR_NANS (mode)
1832 && !HONOR_SIGNED_ZEROS (mode)
1833 && trueop0 == CONST0_RTX (mode)
1834 && ! side_effects_p (op1))
1835 return op0;
1836
1837 /* Change division by a constant into multiplication. Only do
1838 this with -funsafe-math-optimizations. */
1839 else if (GET_CODE (trueop1) == CONST_DOUBLE
1840 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1841 && trueop1 != CONST0_RTX (mode)
1842 && flag_unsafe_math_optimizations)
1843 {
1844 REAL_VALUE_TYPE d;
1845 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1846
1847 if (! REAL_VALUES_EQUAL (d, dconst0))
1848 {
1849 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1850 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1851 return simplify_gen_binary (MULT, mode, op0, tem);
1852 }
1853 }
1854 break;
1855
1856 case UMOD:
1857 /* Handle modulus by power of two (mod with 1 handled below). */
1858 if (GET_CODE (trueop1) == CONST_INT
1859 && exact_log2 (INTVAL (trueop1)) > 0)
1860 return simplify_gen_binary (AND, mode, op0,
1861 GEN_INT (INTVAL (op1) - 1));
1862
1863 /* Fall through.... */
1864
1865 case MOD:
1866 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1867 && ! side_effects_p (op0) && ! side_effects_p (op1))
1868 return const0_rtx;
1869 break;
1870
1871 case ROTATERT:
1872 case ROTATE:
1873 case ASHIFTRT:
1874 /* Rotating ~0 always results in ~0. */
1875 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1876 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1877 && ! side_effects_p (op1))
1878 return op0;
1879
1880 /* Fall through.... */
1881
1882 case ASHIFT:
1883 case LSHIFTRT:
1884 if (trueop1 == const0_rtx)
1885 return op0;
1886 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1887 return op0;
1888 break;
1889
1890 case SMIN:
1891 if (width <= HOST_BITS_PER_WIDE_INT
1892 && GET_CODE (trueop1) == CONST_INT
1893 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1894 && ! side_effects_p (op0))
1895 return op1;
1896 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1897 return op0;
1898 tem = simplify_associative_operation (code, mode, op0, op1);
1899 if (tem)
1900 return tem;
1901 break;
1902
1903 case SMAX:
1904 if (width <= HOST_BITS_PER_WIDE_INT
1905 && GET_CODE (trueop1) == CONST_INT
1906 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1907 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1908 && ! side_effects_p (op0))
1909 return op1;
1910 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1911 return op0;
1912 tem = simplify_associative_operation (code, mode, op0, op1);
1913 if (tem)
1914 return tem;
1915 break;
1916
1917 case UMIN:
1918 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1919 return op1;
1920 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1921 return op0;
1922 tem = simplify_associative_operation (code, mode, op0, op1);
1923 if (tem)
1924 return tem;
1925 break;
1926
1927 case UMAX:
1928 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1929 return op1;
1930 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1931 return op0;
1932 tem = simplify_associative_operation (code, mode, op0, op1);
1933 if (tem)
1934 return tem;
1935 break;
1936
1937 case SS_PLUS:
1938 case US_PLUS:
1939 case SS_MINUS:
1940 case US_MINUS:
1941 /* ??? There are simplifications that can be done. */
1942 return 0;
1943
1944 case VEC_SELECT:
1945 if (!VECTOR_MODE_P (mode))
1946 {
1947 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1948 || (mode
1949 != GET_MODE_INNER (GET_MODE (trueop0)))
1950 || GET_CODE (trueop1) != PARALLEL
1951 || XVECLEN (trueop1, 0) != 1
1952 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1953 abort ();
1954
1955 if (GET_CODE (trueop0) == CONST_VECTOR)
1956 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1957 }
1958 else
1959 {
1960 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1961 || (GET_MODE_INNER (mode)
1962 != GET_MODE_INNER (GET_MODE (trueop0)))
1963 || GET_CODE (trueop1) != PARALLEL)
1964 abort ();
1965
1966 if (GET_CODE (trueop0) == CONST_VECTOR)
1967 {
1968 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1969 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1970 rtvec v = rtvec_alloc (n_elts);
1971 unsigned int i;
1972
1973 if (XVECLEN (trueop1, 0) != (int) n_elts)
1974 abort ();
1975 for (i = 0; i < n_elts; i++)
1976 {
1977 rtx x = XVECEXP (trueop1, 0, i);
1978
1979 if (GET_CODE (x) != CONST_INT)
1980 abort ();
1981 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1982 }
1983
1984 return gen_rtx_CONST_VECTOR (mode, v);
1985 }
1986 }
1987 return 0;
1988 case VEC_CONCAT:
1989 {
1990 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1991 ? GET_MODE (trueop0)
1992 : GET_MODE_INNER (mode));
1993 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1994 ? GET_MODE (trueop1)
1995 : GET_MODE_INNER (mode));
1996
1997 if (!VECTOR_MODE_P (mode)
1998 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1999 != GET_MODE_SIZE (mode)))
2000 abort ();
2001
2002 if ((VECTOR_MODE_P (op0_mode)
2003 && (GET_MODE_INNER (mode)
2004 != GET_MODE_INNER (op0_mode)))
2005 || (!VECTOR_MODE_P (op0_mode)
2006 && GET_MODE_INNER (mode) != op0_mode))
2007 abort ();
2008
2009 if ((VECTOR_MODE_P (op1_mode)
2010 && (GET_MODE_INNER (mode)
2011 != GET_MODE_INNER (op1_mode)))
2012 || (!VECTOR_MODE_P (op1_mode)
2013 && GET_MODE_INNER (mode) != op1_mode))
2014 abort ();
2015
2016 if ((GET_CODE (trueop0) == CONST_VECTOR
2017 || GET_CODE (trueop0) == CONST_INT
2018 || GET_CODE (trueop0) == CONST_DOUBLE)
2019 && (GET_CODE (trueop1) == CONST_VECTOR
2020 || GET_CODE (trueop1) == CONST_INT
2021 || GET_CODE (trueop1) == CONST_DOUBLE))
2022 {
2023 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2024 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2025 rtvec v = rtvec_alloc (n_elts);
2026 unsigned int i;
2027 unsigned in_n_elts = 1;
2028
2029 if (VECTOR_MODE_P (op0_mode))
2030 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2031 for (i = 0; i < n_elts; i++)
2032 {
2033 if (i < in_n_elts)
2034 {
2035 if (!VECTOR_MODE_P (op0_mode))
2036 RTVEC_ELT (v, i) = trueop0;
2037 else
2038 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2039 }
2040 else
2041 {
2042 if (!VECTOR_MODE_P (op1_mode))
2043 RTVEC_ELT (v, i) = trueop1;
2044 else
2045 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2046 i - in_n_elts);
2047 }
2048 }
2049
2050 return gen_rtx_CONST_VECTOR (mode, v);
2051 }
2052 }
2053 return 0;
2054
2055 default:
2056 abort ();
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* Get the integer argument values in two forms:
2063 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2064
2065 arg0 = INTVAL (trueop0);
2066 arg1 = INTVAL (trueop1);
2067
2068 if (width < HOST_BITS_PER_WIDE_INT)
2069 {
2070 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2071 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2072
2073 arg0s = arg0;
2074 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2075 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2076
2077 arg1s = arg1;
2078 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2079 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2080 }
2081 else
2082 {
2083 arg0s = arg0;
2084 arg1s = arg1;
2085 }
2086
2087 /* Compute the value of the arithmetic. */
2088
2089 switch (code)
2090 {
2091 case PLUS:
2092 val = arg0s + arg1s;
2093 break;
2094
2095 case MINUS:
2096 val = arg0s - arg1s;
2097 break;
2098
2099 case MULT:
2100 val = arg0s * arg1s;
2101 break;
2102
2103 case DIV:
2104 if (arg1s == 0
2105 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2106 && arg1s == -1))
2107 return 0;
2108 val = arg0s / arg1s;
2109 break;
2110
2111 case MOD:
2112 if (arg1s == 0
2113 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2114 && arg1s == -1))
2115 return 0;
2116 val = arg0s % arg1s;
2117 break;
2118
2119 case UDIV:
2120 if (arg1 == 0
2121 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2122 && arg1s == -1))
2123 return 0;
2124 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2125 break;
2126
2127 case UMOD:
2128 if (arg1 == 0
2129 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2130 && arg1s == -1))
2131 return 0;
2132 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2133 break;
2134
2135 case AND:
2136 val = arg0 & arg1;
2137 break;
2138
2139 case IOR:
2140 val = arg0 | arg1;
2141 break;
2142
2143 case XOR:
2144 val = arg0 ^ arg1;
2145 break;
2146
2147 case LSHIFTRT:
2148 /* If shift count is undefined, don't fold it; let the machine do
2149 what it wants. But truncate it if the machine will do that. */
2150 if (arg1 < 0)
2151 return 0;
2152
2153 if (SHIFT_COUNT_TRUNCATED)
2154 arg1 %= width;
2155
2156 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2157 break;
2158
2159 case ASHIFT:
2160 if (arg1 < 0)
2161 return 0;
2162
2163 if (SHIFT_COUNT_TRUNCATED)
2164 arg1 %= width;
2165
2166 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2167 break;
2168
2169 case ASHIFTRT:
2170 if (arg1 < 0)
2171 return 0;
2172
2173 if (SHIFT_COUNT_TRUNCATED)
2174 arg1 %= width;
2175
2176 val = arg0s >> arg1;
2177
2178 /* Bootstrap compiler may not have sign extended the right shift.
2179 Manually extend the sign to insure bootstrap cc matches gcc. */
2180 if (arg0s < 0 && arg1 > 0)
2181 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2182
2183 break;
2184
2185 case ROTATERT:
2186 if (arg1 < 0)
2187 return 0;
2188
2189 arg1 %= width;
2190 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2191 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2192 break;
2193
2194 case ROTATE:
2195 if (arg1 < 0)
2196 return 0;
2197
2198 arg1 %= width;
2199 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2200 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2201 break;
2202
2203 case COMPARE:
2204 /* Do nothing here. */
2205 return 0;
2206
2207 case SMIN:
2208 val = arg0s <= arg1s ? arg0s : arg1s;
2209 break;
2210
2211 case UMIN:
2212 val = ((unsigned HOST_WIDE_INT) arg0
2213 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2214 break;
2215
2216 case SMAX:
2217 val = arg0s > arg1s ? arg0s : arg1s;
2218 break;
2219
2220 case UMAX:
2221 val = ((unsigned HOST_WIDE_INT) arg0
2222 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2223 break;
2224
2225 case SS_PLUS:
2226 case US_PLUS:
2227 case SS_MINUS:
2228 case US_MINUS:
2229 /* ??? There are simplifications that can be done. */
2230 return 0;
2231
2232 default:
2233 abort ();
2234 }
2235
2236 val = trunc_int_for_mode (val, mode);
2237
2238 return GEN_INT (val);
2239 }
2240 \f
2241 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2242 PLUS or MINUS.
2243
2244 Rather than test for specific case, we do this by a brute-force method
2245 and do all possible simplifications until no more changes occur. Then
2246 we rebuild the operation.
2247
2248 If FORCE is true, then always generate the rtx. This is used to
2249 canonicalize stuff emitted from simplify_gen_binary. Note that this
2250 can still fail if the rtx is too complex. It won't fail just because
2251 the result is not 'simpler' than the input, however. */
2252
2253 struct simplify_plus_minus_op_data
2254 {
2255 rtx op;
2256 int neg;
2257 };
2258
2259 static int
2260 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2261 {
2262 const struct simplify_plus_minus_op_data *d1 = p1;
2263 const struct simplify_plus_minus_op_data *d2 = p2;
2264
2265 return (commutative_operand_precedence (d2->op)
2266 - commutative_operand_precedence (d1->op));
2267 }
2268
2269 static rtx
2270 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2271 rtx op1, int force)
2272 {
2273 struct simplify_plus_minus_op_data ops[8];
2274 rtx result, tem;
2275 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2276 int first, changed;
2277 int i, j;
2278
2279 memset (ops, 0, sizeof ops);
2280
2281 /* Set up the two operands and then expand them until nothing has been
2282 changed. If we run out of room in our array, give up; this should
2283 almost never happen. */
2284
2285 ops[0].op = op0;
2286 ops[0].neg = 0;
2287 ops[1].op = op1;
2288 ops[1].neg = (code == MINUS);
2289
2290 do
2291 {
2292 changed = 0;
2293
2294 for (i = 0; i < n_ops; i++)
2295 {
2296 rtx this_op = ops[i].op;
2297 int this_neg = ops[i].neg;
2298 enum rtx_code this_code = GET_CODE (this_op);
2299
2300 switch (this_code)
2301 {
2302 case PLUS:
2303 case MINUS:
2304 if (n_ops == 7)
2305 return NULL_RTX;
2306
2307 ops[n_ops].op = XEXP (this_op, 1);
2308 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2309 n_ops++;
2310
2311 ops[i].op = XEXP (this_op, 0);
2312 input_ops++;
2313 changed = 1;
2314 break;
2315
2316 case NEG:
2317 ops[i].op = XEXP (this_op, 0);
2318 ops[i].neg = ! this_neg;
2319 changed = 1;
2320 break;
2321
2322 case CONST:
2323 if (n_ops < 7
2324 && GET_CODE (XEXP (this_op, 0)) == PLUS
2325 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2326 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2327 {
2328 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2329 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2330 ops[n_ops].neg = this_neg;
2331 n_ops++;
2332 input_consts++;
2333 changed = 1;
2334 }
2335 break;
2336
2337 case NOT:
2338 /* ~a -> (-a - 1) */
2339 if (n_ops != 7)
2340 {
2341 ops[n_ops].op = constm1_rtx;
2342 ops[n_ops++].neg = this_neg;
2343 ops[i].op = XEXP (this_op, 0);
2344 ops[i].neg = !this_neg;
2345 changed = 1;
2346 }
2347 break;
2348
2349 case CONST_INT:
2350 if (this_neg)
2351 {
2352 ops[i].op = neg_const_int (mode, this_op);
2353 ops[i].neg = 0;
2354 changed = 1;
2355 }
2356 break;
2357
2358 default:
2359 break;
2360 }
2361 }
2362 }
2363 while (changed);
2364
2365 /* If we only have two operands, we can't do anything. */
2366 if (n_ops <= 2 && !force)
2367 return NULL_RTX;
2368
2369 /* Count the number of CONSTs we didn't split above. */
2370 for (i = 0; i < n_ops; i++)
2371 if (GET_CODE (ops[i].op) == CONST)
2372 input_consts++;
2373
2374 /* Now simplify each pair of operands until nothing changes. The first
2375 time through just simplify constants against each other. */
2376
2377 first = 1;
2378 do
2379 {
2380 changed = first;
2381
2382 for (i = 0; i < n_ops - 1; i++)
2383 for (j = i + 1; j < n_ops; j++)
2384 {
2385 rtx lhs = ops[i].op, rhs = ops[j].op;
2386 int lneg = ops[i].neg, rneg = ops[j].neg;
2387
2388 if (lhs != 0 && rhs != 0
2389 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2390 {
2391 enum rtx_code ncode = PLUS;
2392
2393 if (lneg != rneg)
2394 {
2395 ncode = MINUS;
2396 if (lneg)
2397 tem = lhs, lhs = rhs, rhs = tem;
2398 }
2399 else if (swap_commutative_operands_p (lhs, rhs))
2400 tem = lhs, lhs = rhs, rhs = tem;
2401
2402 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2403
2404 /* Reject "simplifications" that just wrap the two
2405 arguments in a CONST. Failure to do so can result
2406 in infinite recursion with simplify_binary_operation
2407 when it calls us to simplify CONST operations. */
2408 if (tem
2409 && ! (GET_CODE (tem) == CONST
2410 && GET_CODE (XEXP (tem, 0)) == ncode
2411 && XEXP (XEXP (tem, 0), 0) == lhs
2412 && XEXP (XEXP (tem, 0), 1) == rhs)
2413 /* Don't allow -x + -1 -> ~x simplifications in the
2414 first pass. This allows us the chance to combine
2415 the -1 with other constants. */
2416 && ! (first
2417 && GET_CODE (tem) == NOT
2418 && XEXP (tem, 0) == rhs))
2419 {
2420 lneg &= rneg;
2421 if (GET_CODE (tem) == NEG)
2422 tem = XEXP (tem, 0), lneg = !lneg;
2423 if (GET_CODE (tem) == CONST_INT && lneg)
2424 tem = neg_const_int (mode, tem), lneg = 0;
2425
2426 ops[i].op = tem;
2427 ops[i].neg = lneg;
2428 ops[j].op = NULL_RTX;
2429 changed = 1;
2430 }
2431 }
2432 }
2433
2434 first = 0;
2435 }
2436 while (changed);
2437
2438 /* Pack all the operands to the lower-numbered entries. */
2439 for (i = 0, j = 0; j < n_ops; j++)
2440 if (ops[j].op)
2441 ops[i++] = ops[j];
2442 n_ops = i;
2443
2444 /* Sort the operations based on swap_commutative_operands_p. */
2445 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2446
2447 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2448 if (n_ops == 2
2449 && GET_CODE (ops[1].op) == CONST_INT
2450 && CONSTANT_P (ops[0].op)
2451 && ops[0].neg)
2452 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2453
2454 /* We suppressed creation of trivial CONST expressions in the
2455 combination loop to avoid recursion. Create one manually now.
2456 The combination loop should have ensured that there is exactly
2457 one CONST_INT, and the sort will have ensured that it is last
2458 in the array and that any other constant will be next-to-last. */
2459
2460 if (n_ops > 1
2461 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2462 && CONSTANT_P (ops[n_ops - 2].op))
2463 {
2464 rtx value = ops[n_ops - 1].op;
2465 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2466 value = neg_const_int (mode, value);
2467 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2468 n_ops--;
2469 }
2470
2471 /* Count the number of CONSTs that we generated. */
2472 n_consts = 0;
2473 for (i = 0; i < n_ops; i++)
2474 if (GET_CODE (ops[i].op) == CONST)
2475 n_consts++;
2476
2477 /* Give up if we didn't reduce the number of operands we had. Make
2478 sure we count a CONST as two operands. If we have the same
2479 number of operands, but have made more CONSTs than before, this
2480 is also an improvement, so accept it. */
2481 if (!force
2482 && (n_ops + n_consts > input_ops
2483 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2484 return NULL_RTX;
2485
2486 /* Put a non-negated operand first, if possible. */
2487
2488 for (i = 0; i < n_ops && ops[i].neg; i++)
2489 continue;
2490 if (i == n_ops)
2491 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2492 else if (i != 0)
2493 {
2494 tem = ops[0].op;
2495 ops[0] = ops[i];
2496 ops[i].op = tem;
2497 ops[i].neg = 1;
2498 }
2499
2500 /* Now make the result by performing the requested operations. */
2501 result = ops[0].op;
2502 for (i = 1; i < n_ops; i++)
2503 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2504 mode, result, ops[i].op);
2505
2506 return result;
2507 }
2508
2509 /* Like simplify_binary_operation except used for relational operators.
2510 MODE is the mode of the operands, not that of the result. If MODE
2511 is VOIDmode, both operands must also be VOIDmode and we compare the
2512 operands in "infinite precision".
2513
2514 If no simplification is possible, this function returns zero. Otherwise,
2515 it returns either const_true_rtx or const0_rtx. */
2516
2517 rtx
2518 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2519 rtx op0, rtx op1)
2520 {
2521 int equal, op0lt, op0ltu, op1lt, op1ltu;
2522 rtx tem;
2523 rtx trueop0;
2524 rtx trueop1;
2525
2526 if (mode == VOIDmode
2527 && (GET_MODE (op0) != VOIDmode
2528 || GET_MODE (op1) != VOIDmode))
2529 abort ();
2530
2531 /* If op0 is a compare, extract the comparison arguments from it. */
2532 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2533 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2534
2535 /* We can't simplify MODE_CC values since we don't know what the
2536 actual comparison is. */
2537 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2538 return 0;
2539
2540 /* Make sure the constant is second. */
2541 if (swap_commutative_operands_p (op0, op1))
2542 {
2543 tem = op0, op0 = op1, op1 = tem;
2544 code = swap_condition (code);
2545 }
2546
2547 trueop0 = avoid_constant_pool_reference (op0);
2548 trueop1 = avoid_constant_pool_reference (op1);
2549
2550 /* For integer comparisons of A and B maybe we can simplify A - B and can
2551 then simplify a comparison of that with zero. If A and B are both either
2552 a register or a CONST_INT, this can't help; testing for these cases will
2553 prevent infinite recursion here and speed things up.
2554
2555 If CODE is an unsigned comparison, then we can never do this optimization,
2556 because it gives an incorrect result if the subtraction wraps around zero.
2557 ANSI C defines unsigned operations such that they never overflow, and
2558 thus such cases can not be ignored. */
2559
2560 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2561 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2562 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2563 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2564 /* We cannot do this for == or != if tem is a nonzero address. */
2565 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2566 && code != GTU && code != GEU && code != LTU && code != LEU)
2567 return simplify_relational_operation (signed_condition (code),
2568 mode, tem, const0_rtx);
2569
2570 if (flag_unsafe_math_optimizations && code == ORDERED)
2571 return const_true_rtx;
2572
2573 if (flag_unsafe_math_optimizations && code == UNORDERED)
2574 return const0_rtx;
2575
2576 /* For modes without NaNs, if the two operands are equal, we know the
2577 result except if they have side-effects. */
2578 if (! HONOR_NANS (GET_MODE (trueop0))
2579 && rtx_equal_p (trueop0, trueop1)
2580 && ! side_effects_p (trueop0))
2581 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2582
2583 /* If the operands are floating-point constants, see if we can fold
2584 the result. */
2585 else if (GET_CODE (trueop0) == CONST_DOUBLE
2586 && GET_CODE (trueop1) == CONST_DOUBLE
2587 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2588 {
2589 REAL_VALUE_TYPE d0, d1;
2590
2591 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2592 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2593
2594 /* Comparisons are unordered iff at least one of the values is NaN. */
2595 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2596 switch (code)
2597 {
2598 case UNEQ:
2599 case UNLT:
2600 case UNGT:
2601 case UNLE:
2602 case UNGE:
2603 case NE:
2604 case UNORDERED:
2605 return const_true_rtx;
2606 case EQ:
2607 case LT:
2608 case GT:
2609 case LE:
2610 case GE:
2611 case LTGT:
2612 case ORDERED:
2613 return const0_rtx;
2614 default:
2615 return 0;
2616 }
2617
2618 equal = REAL_VALUES_EQUAL (d0, d1);
2619 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2620 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2621 }
2622
2623 /* Otherwise, see if the operands are both integers. */
2624 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2625 && (GET_CODE (trueop0) == CONST_DOUBLE
2626 || GET_CODE (trueop0) == CONST_INT)
2627 && (GET_CODE (trueop1) == CONST_DOUBLE
2628 || GET_CODE (trueop1) == CONST_INT))
2629 {
2630 int width = GET_MODE_BITSIZE (mode);
2631 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2632 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2633
2634 /* Get the two words comprising each integer constant. */
2635 if (GET_CODE (trueop0) == CONST_DOUBLE)
2636 {
2637 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2638 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2639 }
2640 else
2641 {
2642 l0u = l0s = INTVAL (trueop0);
2643 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2644 }
2645
2646 if (GET_CODE (trueop1) == CONST_DOUBLE)
2647 {
2648 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2649 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2650 }
2651 else
2652 {
2653 l1u = l1s = INTVAL (trueop1);
2654 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2655 }
2656
2657 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2658 we have to sign or zero-extend the values. */
2659 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2660 {
2661 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2662 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2663
2664 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2665 l0s |= ((HOST_WIDE_INT) (-1) << width);
2666
2667 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2668 l1s |= ((HOST_WIDE_INT) (-1) << width);
2669 }
2670 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2671 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2672
2673 equal = (h0u == h1u && l0u == l1u);
2674 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2675 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2676 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2677 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2678 }
2679
2680 /* Otherwise, there are some code-specific tests we can make. */
2681 else
2682 {
2683 switch (code)
2684 {
2685 case EQ:
2686 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2687 return const0_rtx;
2688 break;
2689
2690 case NE:
2691 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2692 return const_true_rtx;
2693 break;
2694
2695 case GEU:
2696 /* Unsigned values are never negative. */
2697 if (trueop1 == const0_rtx)
2698 return const_true_rtx;
2699 break;
2700
2701 case LTU:
2702 if (trueop1 == const0_rtx)
2703 return const0_rtx;
2704 break;
2705
2706 case LEU:
2707 /* Unsigned values are never greater than the largest
2708 unsigned value. */
2709 if (GET_CODE (trueop1) == CONST_INT
2710 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2711 && INTEGRAL_MODE_P (mode))
2712 return const_true_rtx;
2713 break;
2714
2715 case GTU:
2716 if (GET_CODE (trueop1) == CONST_INT
2717 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2718 && INTEGRAL_MODE_P (mode))
2719 return const0_rtx;
2720 break;
2721
2722 case LT:
2723 /* Optimize abs(x) < 0.0. */
2724 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2725 {
2726 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2727 : trueop0;
2728 if (GET_CODE (tem) == ABS)
2729 return const0_rtx;
2730 }
2731 break;
2732
2733 case GE:
2734 /* Optimize abs(x) >= 0.0. */
2735 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2736 {
2737 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2738 : trueop0;
2739 if (GET_CODE (tem) == ABS)
2740 return const_true_rtx;
2741 }
2742 break;
2743
2744 case UNGE:
2745 /* Optimize ! (abs(x) < 0.0). */
2746 if (trueop1 == CONST0_RTX (mode))
2747 {
2748 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2749 : trueop0;
2750 if (GET_CODE (tem) == ABS)
2751 return const_true_rtx;
2752 }
2753 break;
2754
2755 default:
2756 break;
2757 }
2758
2759 return 0;
2760 }
2761
2762 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2763 as appropriate. */
2764 switch (code)
2765 {
2766 case EQ:
2767 case UNEQ:
2768 return equal ? const_true_rtx : const0_rtx;
2769 case NE:
2770 case LTGT:
2771 return ! equal ? const_true_rtx : const0_rtx;
2772 case LT:
2773 case UNLT:
2774 return op0lt ? const_true_rtx : const0_rtx;
2775 case GT:
2776 case UNGT:
2777 return op1lt ? const_true_rtx : const0_rtx;
2778 case LTU:
2779 return op0ltu ? const_true_rtx : const0_rtx;
2780 case GTU:
2781 return op1ltu ? const_true_rtx : const0_rtx;
2782 case LE:
2783 case UNLE:
2784 return equal || op0lt ? const_true_rtx : const0_rtx;
2785 case GE:
2786 case UNGE:
2787 return equal || op1lt ? const_true_rtx : const0_rtx;
2788 case LEU:
2789 return equal || op0ltu ? const_true_rtx : const0_rtx;
2790 case GEU:
2791 return equal || op1ltu ? const_true_rtx : const0_rtx;
2792 case ORDERED:
2793 return const_true_rtx;
2794 case UNORDERED:
2795 return const0_rtx;
2796 default:
2797 abort ();
2798 }
2799 }
2800 \f
2801 /* Simplify CODE, an operation with result mode MODE and three operands,
2802 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2803 a constant. Return 0 if no simplifications is possible. */
2804
2805 rtx
2806 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2807 enum machine_mode op0_mode, rtx op0, rtx op1,
2808 rtx op2)
2809 {
2810 unsigned int width = GET_MODE_BITSIZE (mode);
2811
2812 /* VOIDmode means "infinite" precision. */
2813 if (width == 0)
2814 width = HOST_BITS_PER_WIDE_INT;
2815
2816 switch (code)
2817 {
2818 case SIGN_EXTRACT:
2819 case ZERO_EXTRACT:
2820 if (GET_CODE (op0) == CONST_INT
2821 && GET_CODE (op1) == CONST_INT
2822 && GET_CODE (op2) == CONST_INT
2823 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2824 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2825 {
2826 /* Extracting a bit-field from a constant */
2827 HOST_WIDE_INT val = INTVAL (op0);
2828
2829 if (BITS_BIG_ENDIAN)
2830 val >>= (GET_MODE_BITSIZE (op0_mode)
2831 - INTVAL (op2) - INTVAL (op1));
2832 else
2833 val >>= INTVAL (op2);
2834
2835 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2836 {
2837 /* First zero-extend. */
2838 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2839 /* If desired, propagate sign bit. */
2840 if (code == SIGN_EXTRACT
2841 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2842 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2843 }
2844
2845 /* Clear the bits that don't belong in our mode,
2846 unless they and our sign bit are all one.
2847 So we get either a reasonable negative value or a reasonable
2848 unsigned value for this mode. */
2849 if (width < HOST_BITS_PER_WIDE_INT
2850 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2851 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2852 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2853
2854 return GEN_INT (val);
2855 }
2856 break;
2857
2858 case IF_THEN_ELSE:
2859 if (GET_CODE (op0) == CONST_INT)
2860 return op0 != const0_rtx ? op1 : op2;
2861
2862 /* Convert c ? a : a into "a". */
2863 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2864 return op1;
2865
2866 /* Convert a != b ? a : b into "a". */
2867 if (GET_CODE (op0) == NE
2868 && ! side_effects_p (op0)
2869 && ! HONOR_NANS (mode)
2870 && ! HONOR_SIGNED_ZEROS (mode)
2871 && ((rtx_equal_p (XEXP (op0, 0), op1)
2872 && rtx_equal_p (XEXP (op0, 1), op2))
2873 || (rtx_equal_p (XEXP (op0, 0), op2)
2874 && rtx_equal_p (XEXP (op0, 1), op1))))
2875 return op1;
2876
2877 /* Convert a == b ? a : b into "b". */
2878 if (GET_CODE (op0) == EQ
2879 && ! side_effects_p (op0)
2880 && ! HONOR_NANS (mode)
2881 && ! HONOR_SIGNED_ZEROS (mode)
2882 && ((rtx_equal_p (XEXP (op0, 0), op1)
2883 && rtx_equal_p (XEXP (op0, 1), op2))
2884 || (rtx_equal_p (XEXP (op0, 0), op2)
2885 && rtx_equal_p (XEXP (op0, 1), op1))))
2886 return op2;
2887
2888 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2889 {
2890 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2891 ? GET_MODE (XEXP (op0, 1))
2892 : GET_MODE (XEXP (op0, 0)));
2893 rtx temp;
2894 if (cmp_mode == VOIDmode)
2895 cmp_mode = op0_mode;
2896 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2897 XEXP (op0, 0), XEXP (op0, 1));
2898
2899 /* See if any simplifications were possible. */
2900 if (temp == const0_rtx)
2901 return op2;
2902 else if (temp == const_true_rtx)
2903 return op1;
2904 else if (temp)
2905 abort ();
2906
2907 /* Look for happy constants in op1 and op2. */
2908 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2909 {
2910 HOST_WIDE_INT t = INTVAL (op1);
2911 HOST_WIDE_INT f = INTVAL (op2);
2912
2913 if (t == STORE_FLAG_VALUE && f == 0)
2914 code = GET_CODE (op0);
2915 else if (t == 0 && f == STORE_FLAG_VALUE)
2916 {
2917 enum rtx_code tmp;
2918 tmp = reversed_comparison_code (op0, NULL_RTX);
2919 if (tmp == UNKNOWN)
2920 break;
2921 code = tmp;
2922 }
2923 else
2924 break;
2925
2926 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2927 }
2928 }
2929 break;
2930
2931 case VEC_MERGE:
2932 if (GET_MODE (op0) != mode
2933 || GET_MODE (op1) != mode
2934 || !VECTOR_MODE_P (mode))
2935 abort ();
2936 op2 = avoid_constant_pool_reference (op2);
2937 if (GET_CODE (op2) == CONST_INT)
2938 {
2939 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2940 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2941 int mask = (1 << n_elts) - 1;
2942
2943 if (!(INTVAL (op2) & mask))
2944 return op1;
2945 if ((INTVAL (op2) & mask) == mask)
2946 return op0;
2947
2948 op0 = avoid_constant_pool_reference (op0);
2949 op1 = avoid_constant_pool_reference (op1);
2950 if (GET_CODE (op0) == CONST_VECTOR
2951 && GET_CODE (op1) == CONST_VECTOR)
2952 {
2953 rtvec v = rtvec_alloc (n_elts);
2954 unsigned int i;
2955
2956 for (i = 0; i < n_elts; i++)
2957 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2958 ? CONST_VECTOR_ELT (op0, i)
2959 : CONST_VECTOR_ELT (op1, i));
2960 return gen_rtx_CONST_VECTOR (mode, v);
2961 }
2962 }
2963 break;
2964
2965 default:
2966 abort ();
2967 }
2968
2969 return 0;
2970 }
2971
2972 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2973 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2974
2975 Works by unpacking OP into a collection of 8-bit values
2976 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2977 and then repacking them again for OUTERMODE. */
2978
2979 static rtx
2980 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2981 enum machine_mode innermode, unsigned int byte)
2982 {
2983 /* We support up to 512-bit values (for V8DFmode). */
2984 enum {
2985 max_bitsize = 512,
2986 value_bit = 8,
2987 value_mask = (1 << value_bit) - 1
2988 };
2989 unsigned char value[max_bitsize / value_bit];
2990 int value_start;
2991 int i;
2992 int elem;
2993
2994 int num_elem;
2995 rtx * elems;
2996 int elem_bitsize;
2997 rtx result_s;
2998 rtvec result_v = NULL;
2999 enum mode_class outer_class;
3000 enum machine_mode outer_submode;
3001
3002 /* Some ports misuse CCmode. */
3003 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3004 return op;
3005
3006 /* Unpack the value. */
3007
3008 if (GET_CODE (op) == CONST_VECTOR)
3009 {
3010 num_elem = CONST_VECTOR_NUNITS (op);
3011 elems = &CONST_VECTOR_ELT (op, 0);
3012 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3013 }
3014 else
3015 {
3016 num_elem = 1;
3017 elems = &op;
3018 elem_bitsize = max_bitsize;
3019 }
3020
3021 if (BITS_PER_UNIT % value_bit != 0)
3022 abort (); /* Too complicated; reducing value_bit may help. */
3023 if (elem_bitsize % BITS_PER_UNIT != 0)
3024 abort (); /* I don't know how to handle endianness of sub-units. */
3025
3026 for (elem = 0; elem < num_elem; elem++)
3027 {
3028 unsigned char * vp;
3029 rtx el = elems[elem];
3030
3031 /* Vectors are kept in target memory order. (This is probably
3032 a mistake.) */
3033 {
3034 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3035 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3036 / BITS_PER_UNIT);
3037 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3038 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3039 unsigned bytele = (subword_byte % UNITS_PER_WORD
3040 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3041 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3042 }
3043
3044 switch (GET_CODE (el))
3045 {
3046 case CONST_INT:
3047 for (i = 0;
3048 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3049 i += value_bit)
3050 *vp++ = INTVAL (el) >> i;
3051 /* CONST_INTs are always logically sign-extended. */
3052 for (; i < elem_bitsize; i += value_bit)
3053 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3054 break;
3055
3056 case CONST_DOUBLE:
3057 if (GET_MODE (el) == VOIDmode)
3058 {
3059 /* If this triggers, someone should have generated a
3060 CONST_INT instead. */
3061 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3062 abort ();
3063
3064 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3065 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3066 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3067 {
3068 *vp++
3069 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3070 i += value_bit;
3071 }
3072 /* It shouldn't matter what's done here, so fill it with
3073 zero. */
3074 for (; i < max_bitsize; i += value_bit)
3075 *vp++ = 0;
3076 }
3077 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3078 {
3079 long tmp[max_bitsize / 32];
3080 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3081
3082 if (bitsize > elem_bitsize)
3083 abort ();
3084 if (bitsize % value_bit != 0)
3085 abort ();
3086
3087 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3088 GET_MODE (el));
3089
3090 /* real_to_target produces its result in words affected by
3091 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3092 and use WORDS_BIG_ENDIAN instead; see the documentation
3093 of SUBREG in rtl.texi. */
3094 for (i = 0; i < bitsize; i += value_bit)
3095 {
3096 int ibase;
3097 if (WORDS_BIG_ENDIAN)
3098 ibase = bitsize - 1 - i;
3099 else
3100 ibase = i;
3101 *vp++ = tmp[ibase / 32] >> i % 32;
3102 }
3103
3104 /* It shouldn't matter what's done here, so fill it with
3105 zero. */
3106 for (; i < elem_bitsize; i += value_bit)
3107 *vp++ = 0;
3108 }
3109 else
3110 abort ();
3111 break;
3112
3113 default:
3114 abort ();
3115 }
3116 }
3117
3118 /* Now, pick the right byte to start with. */
3119 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3120 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3121 will already have offset 0. */
3122 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3123 {
3124 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3125 - byte);
3126 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3127 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3128 byte = (subword_byte % UNITS_PER_WORD
3129 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3130 }
3131
3132 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3133 so if it's become negative it will instead be very large.) */
3134 if (byte >= GET_MODE_SIZE (innermode))
3135 abort ();
3136
3137 /* Convert from bytes to chunks of size value_bit. */
3138 value_start = byte * (BITS_PER_UNIT / value_bit);
3139
3140 /* Re-pack the value. */
3141
3142 if (VECTOR_MODE_P (outermode))
3143 {
3144 num_elem = GET_MODE_NUNITS (outermode);
3145 result_v = rtvec_alloc (num_elem);
3146 elems = &RTVEC_ELT (result_v, 0);
3147 outer_submode = GET_MODE_INNER (outermode);
3148 }
3149 else
3150 {
3151 num_elem = 1;
3152 elems = &result_s;
3153 outer_submode = outermode;
3154 }
3155
3156 outer_class = GET_MODE_CLASS (outer_submode);
3157 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3158
3159 if (elem_bitsize % value_bit != 0)
3160 abort ();
3161 if (elem_bitsize + value_start * value_bit > max_bitsize)
3162 abort ();
3163
3164 for (elem = 0; elem < num_elem; elem++)
3165 {
3166 unsigned char *vp;
3167
3168 /* Vectors are stored in target memory order. (This is probably
3169 a mistake.) */
3170 {
3171 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3172 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3173 / BITS_PER_UNIT);
3174 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3175 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3176 unsigned bytele = (subword_byte % UNITS_PER_WORD
3177 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3178 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3179 }
3180
3181 switch (outer_class)
3182 {
3183 case MODE_INT:
3184 case MODE_PARTIAL_INT:
3185 {
3186 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3187
3188 for (i = 0;
3189 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3190 i += value_bit)
3191 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3192 for (; i < elem_bitsize; i += value_bit)
3193 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3194 << (i - HOST_BITS_PER_WIDE_INT));
3195
3196 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3197 know why. */
3198 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3199 elems[elem] = gen_int_mode (lo, outer_submode);
3200 else
3201 elems[elem] = immed_double_const (lo, hi, outer_submode);
3202 }
3203 break;
3204
3205 case MODE_FLOAT:
3206 {
3207 REAL_VALUE_TYPE r;
3208 long tmp[max_bitsize / 32];
3209
3210 /* real_from_target wants its input in words affected by
3211 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3212 and use WORDS_BIG_ENDIAN instead; see the documentation
3213 of SUBREG in rtl.texi. */
3214 for (i = 0; i < max_bitsize / 32; i++)
3215 tmp[i] = 0;
3216 for (i = 0; i < elem_bitsize; i += value_bit)
3217 {
3218 int ibase;
3219 if (WORDS_BIG_ENDIAN)
3220 ibase = elem_bitsize - 1 - i;
3221 else
3222 ibase = i;
3223 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3224 }
3225
3226 real_from_target (&r, tmp, outer_submode);
3227 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3228 }
3229 break;
3230
3231 default:
3232 abort ();
3233 }
3234 }
3235 if (VECTOR_MODE_P (outermode))
3236 return gen_rtx_CONST_VECTOR (outermode, result_v);
3237 else
3238 return result_s;
3239 }
3240
3241 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3242 Return 0 if no simplifications are possible. */
3243 rtx
3244 simplify_subreg (enum machine_mode outermode, rtx op,
3245 enum machine_mode innermode, unsigned int byte)
3246 {
3247 /* Little bit of sanity checking. */
3248 if (innermode == VOIDmode || outermode == VOIDmode
3249 || innermode == BLKmode || outermode == BLKmode)
3250 abort ();
3251
3252 if (GET_MODE (op) != innermode
3253 && GET_MODE (op) != VOIDmode)
3254 abort ();
3255
3256 if (byte % GET_MODE_SIZE (outermode)
3257 || byte >= GET_MODE_SIZE (innermode))
3258 abort ();
3259
3260 if (outermode == innermode && !byte)
3261 return op;
3262
3263 if (GET_CODE (op) == CONST_INT
3264 || GET_CODE (op) == CONST_DOUBLE
3265 || GET_CODE (op) == CONST_VECTOR)
3266 return simplify_immed_subreg (outermode, op, innermode, byte);
3267
3268 /* Changing mode twice with SUBREG => just change it once,
3269 or not at all if changing back op starting mode. */
3270 if (GET_CODE (op) == SUBREG)
3271 {
3272 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3273 int final_offset = byte + SUBREG_BYTE (op);
3274 rtx new;
3275
3276 if (outermode == innermostmode
3277 && byte == 0 && SUBREG_BYTE (op) == 0)
3278 return SUBREG_REG (op);
3279
3280 /* The SUBREG_BYTE represents offset, as if the value were stored
3281 in memory. Irritating exception is paradoxical subreg, where
3282 we define SUBREG_BYTE to be 0. On big endian machines, this
3283 value should be negative. For a moment, undo this exception. */
3284 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3285 {
3286 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3287 if (WORDS_BIG_ENDIAN)
3288 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3289 if (BYTES_BIG_ENDIAN)
3290 final_offset += difference % UNITS_PER_WORD;
3291 }
3292 if (SUBREG_BYTE (op) == 0
3293 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3294 {
3295 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3296 if (WORDS_BIG_ENDIAN)
3297 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3298 if (BYTES_BIG_ENDIAN)
3299 final_offset += difference % UNITS_PER_WORD;
3300 }
3301
3302 /* See whether resulting subreg will be paradoxical. */
3303 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3304 {
3305 /* In nonparadoxical subregs we can't handle negative offsets. */
3306 if (final_offset < 0)
3307 return NULL_RTX;
3308 /* Bail out in case resulting subreg would be incorrect. */
3309 if (final_offset % GET_MODE_SIZE (outermode)
3310 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3311 return NULL_RTX;
3312 }
3313 else
3314 {
3315 int offset = 0;
3316 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3317
3318 /* In paradoxical subreg, see if we are still looking on lower part.
3319 If so, our SUBREG_BYTE will be 0. */
3320 if (WORDS_BIG_ENDIAN)
3321 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3322 if (BYTES_BIG_ENDIAN)
3323 offset += difference % UNITS_PER_WORD;
3324 if (offset == final_offset)
3325 final_offset = 0;
3326 else
3327 return NULL_RTX;
3328 }
3329
3330 /* Recurse for further possible simplifications. */
3331 new = simplify_subreg (outermode, SUBREG_REG (op),
3332 GET_MODE (SUBREG_REG (op)),
3333 final_offset);
3334 if (new)
3335 return new;
3336 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3337 }
3338
3339 /* SUBREG of a hard register => just change the register number
3340 and/or mode. If the hard register is not valid in that mode,
3341 suppress this simplification. If the hard register is the stack,
3342 frame, or argument pointer, leave this as a SUBREG. */
3343
3344 if (REG_P (op)
3345 && (! REG_FUNCTION_VALUE_P (op)
3346 || ! rtx_equal_function_value_matters)
3347 && REGNO (op) < FIRST_PSEUDO_REGISTER
3348 #ifdef CANNOT_CHANGE_MODE_CLASS
3349 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3350 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3351 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3352 #endif
3353 && ((reload_completed && !frame_pointer_needed)
3354 || (REGNO (op) != FRAME_POINTER_REGNUM
3355 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3356 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3357 #endif
3358 ))
3359 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3360 && REGNO (op) != ARG_POINTER_REGNUM
3361 #endif
3362 && REGNO (op) != STACK_POINTER_REGNUM
3363 && subreg_offset_representable_p (REGNO (op), innermode,
3364 byte, outermode))
3365 {
3366 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3367 int final_regno = subreg_hard_regno (tem, 0);
3368
3369 /* ??? We do allow it if the current REG is not valid for
3370 its mode. This is a kludge to work around how float/complex
3371 arguments are passed on 32-bit SPARC and should be fixed. */
3372 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3373 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3374 {
3375 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3376
3377 /* Propagate original regno. We don't have any way to specify
3378 the offset inside original regno, so do so only for lowpart.
3379 The information is used only by alias analysis that can not
3380 grog partial register anyway. */
3381
3382 if (subreg_lowpart_offset (outermode, innermode) == byte)
3383 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3384 return x;
3385 }
3386 }
3387
3388 /* If we have a SUBREG of a register that we are replacing and we are
3389 replacing it with a MEM, make a new MEM and try replacing the
3390 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3391 or if we would be widening it. */
3392
3393 if (GET_CODE (op) == MEM
3394 && ! mode_dependent_address_p (XEXP (op, 0))
3395 /* Allow splitting of volatile memory references in case we don't
3396 have instruction to move the whole thing. */
3397 && (! MEM_VOLATILE_P (op)
3398 || ! have_insn_for (SET, innermode))
3399 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3400 return adjust_address_nv (op, outermode, byte);
3401
3402 /* Handle complex values represented as CONCAT
3403 of real and imaginary part. */
3404 if (GET_CODE (op) == CONCAT)
3405 {
3406 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3407 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3408 unsigned int final_offset;
3409 rtx res;
3410
3411 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3412 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3413 if (res)
3414 return res;
3415 /* We can at least simplify it by referring directly to the
3416 relevant part. */
3417 return gen_rtx_SUBREG (outermode, part, final_offset);
3418 }
3419
3420 /* Optimize SUBREG truncations of zero and sign extended values. */
3421 if ((GET_CODE (op) == ZERO_EXTEND
3422 || GET_CODE (op) == SIGN_EXTEND)
3423 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3424 {
3425 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3426
3427 /* If we're requesting the lowpart of a zero or sign extension,
3428 there are three possibilities. If the outermode is the same
3429 as the origmode, we can omit both the extension and the subreg.
3430 If the outermode is not larger than the origmode, we can apply
3431 the truncation without the extension. Finally, if the outermode
3432 is larger than the origmode, but both are integer modes, we
3433 can just extend to the appropriate mode. */
3434 if (bitpos == 0)
3435 {
3436 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3437 if (outermode == origmode)
3438 return XEXP (op, 0);
3439 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3440 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3441 subreg_lowpart_offset (outermode,
3442 origmode));
3443 if (SCALAR_INT_MODE_P (outermode))
3444 return simplify_gen_unary (GET_CODE (op), outermode,
3445 XEXP (op, 0), origmode);
3446 }
3447
3448 /* A SUBREG resulting from a zero extension may fold to zero if
3449 it extracts higher bits that the ZERO_EXTEND's source bits. */
3450 if (GET_CODE (op) == ZERO_EXTEND
3451 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3452 return CONST0_RTX (outermode);
3453 }
3454
3455 return NULL_RTX;
3456 }
3457
3458 /* Make a SUBREG operation or equivalent if it folds. */
3459
3460 rtx
3461 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3462 enum machine_mode innermode, unsigned int byte)
3463 {
3464 rtx new;
3465 /* Little bit of sanity checking. */
3466 if (innermode == VOIDmode || outermode == VOIDmode
3467 || innermode == BLKmode || outermode == BLKmode)
3468 abort ();
3469
3470 if (GET_MODE (op) != innermode
3471 && GET_MODE (op) != VOIDmode)
3472 abort ();
3473
3474 if (byte % GET_MODE_SIZE (outermode)
3475 || byte >= GET_MODE_SIZE (innermode))
3476 abort ();
3477
3478 if (GET_CODE (op) == QUEUED)
3479 return NULL_RTX;
3480
3481 new = simplify_subreg (outermode, op, innermode, byte);
3482 if (new)
3483 return new;
3484
3485 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3486 return NULL_RTX;
3487
3488 return gen_rtx_SUBREG (outermode, op, byte);
3489 }
3490 /* Simplify X, an rtx expression.
3491
3492 Return the simplified expression or NULL if no simplifications
3493 were possible.
3494
3495 This is the preferred entry point into the simplification routines;
3496 however, we still allow passes to call the more specific routines.
3497
3498 Right now GCC has three (yes, three) major bodies of RTL simplification
3499 code that need to be unified.
3500
3501 1. fold_rtx in cse.c. This code uses various CSE specific
3502 information to aid in RTL simplification.
3503
3504 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3505 it uses combine specific information to aid in RTL
3506 simplification.
3507
3508 3. The routines in this file.
3509
3510
3511 Long term we want to only have one body of simplification code; to
3512 get to that state I recommend the following steps:
3513
3514 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3515 which are not pass dependent state into these routines.
3516
3517 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3518 use this routine whenever possible.
3519
3520 3. Allow for pass dependent state to be provided to these
3521 routines and add simplifications based on the pass dependent
3522 state. Remove code from cse.c & combine.c that becomes
3523 redundant/dead.
3524
3525 It will take time, but ultimately the compiler will be easier to
3526 maintain and improve. It's totally silly that when we add a
3527 simplification that it needs to be added to 4 places (3 for RTL
3528 simplification and 1 for tree simplification. */
3529
3530 rtx
3531 simplify_rtx (rtx x)
3532 {
3533 enum rtx_code code = GET_CODE (x);
3534 enum machine_mode mode = GET_MODE (x);
3535 rtx temp;
3536
3537 switch (GET_RTX_CLASS (code))
3538 {
3539 case '1':
3540 return simplify_unary_operation (code, mode,
3541 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3542 case 'c':
3543 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3544 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3545
3546 /* Fall through.... */
3547
3548 case '2':
3549 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3550
3551 case '3':
3552 case 'b':
3553 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3554 XEXP (x, 0), XEXP (x, 1),
3555 XEXP (x, 2));
3556
3557 case '<':
3558 temp = simplify_relational_operation (code,
3559 ((GET_MODE (XEXP (x, 0))
3560 != VOIDmode)
3561 ? GET_MODE (XEXP (x, 0))
3562 : GET_MODE (XEXP (x, 1))),
3563 XEXP (x, 0), XEXP (x, 1));
3564 #ifdef FLOAT_STORE_FLAG_VALUE
3565 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3566 {
3567 if (temp == const0_rtx)
3568 temp = CONST0_RTX (mode);
3569 else
3570 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3571 mode);
3572 }
3573 #endif
3574 return temp;
3575
3576 case 'x':
3577 if (code == SUBREG)
3578 return simplify_gen_subreg (mode, SUBREG_REG (x),
3579 GET_MODE (SUBREG_REG (x)),
3580 SUBREG_BYTE (x));
3581 if (code == CONSTANT_P_RTX)
3582 {
3583 if (CONSTANT_P (XEXP (x, 0)))
3584 return const1_rtx;
3585 }
3586 break;
3587
3588 case 'o':
3589 if (code == LO_SUM)
3590 {
3591 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3592 if (GET_CODE (XEXP (x, 0)) == HIGH
3593 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3594 return XEXP (x, 1);
3595 }
3596 break;
3597
3598 default:
3599 break;
3600 }
3601 return NULL;
3602 }