i386.md: Simplify certain comparisons of const_int.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 \f
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
76 {
77 rtx tem;
78
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == 'c'
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
83
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
88
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
91
92 if (code == PLUS || code == MINUS)
93 {
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
97 }
98
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
100 }
101 \f
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
104 rtx
105 avoid_constant_pool_reference (rtx x)
106 {
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
109
110 switch (GET_CODE (x))
111 {
112 case MEM:
113 break;
114
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 {
121 REAL_VALUE_TYPE d;
122
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 }
126 return x;
127
128 default:
129 return x;
130 }
131
132 addr = XEXP (x, 0);
133
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
136
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
139
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
143
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
146
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
151 {
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
154 }
155
156 return c;
157 }
158 \f
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
161
162 rtx
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
165 {
166 rtx tem;
167
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
171
172 return gen_rtx_fmt_e (code, mode, op);
173 }
174
175 /* Likewise for ternary operations. */
176
177 rtx
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 {
181 rtx tem;
182
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
187
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
189 }
190 \f
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
193 */
194
195 rtx
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 {
199 rtx tem;
200
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
205
206 if (cmp_mode != VOIDmode)
207 {
208 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
209
210 if (tem)
211 {
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
214 {
215 REAL_VALUE_TYPE val;
216 if (tem == const0_rtx)
217 return CONST0_RTX (mode);
218 if (tem != const_true_rtx)
219 abort ();
220 val = FLOAT_STORE_FLAG_VALUE (mode);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
222 }
223 #endif
224 return tem;
225 }
226 }
227
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0, op1)
230 || (op0 == const0_rtx && op1 != const0_rtx))
231 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
235 return simplify_gen_relational (code, mode, VOIDmode,
236 XEXP (op0, 0), XEXP (op0, 1));
237
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
240 {
241 if (code == NE)
242 {
243 if (GET_MODE (op0) == mode)
244 return op0;
245 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
247 }
248 else if (code == EQ)
249 {
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
252 return simplify_gen_relational (new, mode, VOIDmode,
253 XEXP (op0, 0), XEXP (op0, 1));
254 }
255 }
256
257 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 }
259 \f
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
262
263 rtx
264 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 {
266 enum rtx_code code = GET_CODE (x);
267 enum machine_mode mode = GET_MODE (x);
268 enum machine_mode op_mode;
269 rtx op0, op1, op2;
270
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
274
275 if (x == old)
276 return new;
277
278 switch (GET_RTX_CLASS (code))
279 {
280 case '1':
281 op0 = XEXP (x, 0);
282 op_mode = GET_MODE (op0);
283 op0 = simplify_replace_rtx (op0, old, new);
284 if (op0 == XEXP (x, 0))
285 return x;
286 return simplify_gen_unary (code, mode, op0, op_mode);
287
288 case '2':
289 case 'c':
290 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return x;
294 return simplify_gen_binary (code, mode, op0, op1);
295
296 case '<':
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old, new);
301 op1 = simplify_replace_rtx (op1, old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306 case '3':
307 case 'b':
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319 case 'x':
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
322 {
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
330 }
331 break;
332
333 case 'o':
334 if (code == MEM)
335 {
336 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
340 }
341 else if (code == LO_SUM)
342 {
343 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
345
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
349
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
353 }
354 else if (code == REG)
355 {
356 if (REG_P (old) && REGNO (x) == REGNO (old))
357 return new;
358 }
359 break;
360
361 default:
362 break;
363 }
364 return x;
365 }
366 \f
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
370 rtx
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
373 {
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
376
377 if (code == VEC_DUPLICATE)
378 {
379 if (!VECTOR_MODE_P (mode))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE (trueop))
384 abort ();
385 if (GET_MODE (trueop) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop))
387 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
388 abort ();
389 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
390 || GET_CODE (trueop) == CONST_VECTOR)
391 {
392 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
393 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
394 rtvec v = rtvec_alloc (n_elts);
395 unsigned int i;
396
397 if (GET_CODE (trueop) != CONST_VECTOR)
398 for (i = 0; i < n_elts; i++)
399 RTVEC_ELT (v, i) = trueop;
400 else
401 {
402 enum machine_mode inmode = GET_MODE (trueop);
403 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
404 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
405
406 if (in_n_elts >= n_elts || n_elts % in_n_elts)
407 abort ();
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
410 }
411 return gen_rtx_CONST_VECTOR (mode, v);
412 }
413 }
414 else if (GET_CODE (op) == CONST)
415 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
416
417 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
418 {
419 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
420 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
421 enum machine_mode opmode = GET_MODE (trueop);
422 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
423 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
424 rtvec v = rtvec_alloc (n_elts);
425 unsigned int i;
426
427 if (op_n_elts != n_elts)
428 abort ();
429
430 for (i = 0; i < n_elts; i++)
431 {
432 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
433 CONST_VECTOR_ELT (trueop, i),
434 GET_MODE_INNER (opmode));
435 if (!x)
436 return 0;
437 RTVEC_ELT (v, i) = x;
438 }
439 return gen_rtx_CONST_VECTOR (mode, v);
440 }
441
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
445
446 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
447 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
448 {
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
451
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456
457 REAL_VALUE_FROM_INT (d, lv, hv, mode);
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
460 }
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
464 {
465 HOST_WIDE_INT hv, lv;
466 REAL_VALUE_TYPE d;
467
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
470 else
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
472
473 if (op_mode == VOIDmode)
474 {
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
477 if (hv < 0)
478 return 0;
479 }
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 ;
482 else
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
484
485 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
486 d = real_value_truncate (mode, d);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
488 }
489
490 if (GET_CODE (trueop) == CONST_INT
491 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
492 {
493 HOST_WIDE_INT arg0 = INTVAL (trueop);
494 HOST_WIDE_INT val;
495
496 switch (code)
497 {
498 case NOT:
499 val = ~ arg0;
500 break;
501
502 case NEG:
503 val = - arg0;
504 break;
505
506 case ABS:
507 val = (arg0 >= 0 ? arg0 : - arg0);
508 break;
509
510 case FFS:
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0 &= GET_MODE_MASK (mode);
514 val = exact_log2 (arg0 & (- arg0)) + 1;
515 break;
516
517 case CLZ:
518 arg0 &= GET_MODE_MASK (mode);
519 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
520 ;
521 else
522 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
523 break;
524
525 case CTZ:
526 arg0 &= GET_MODE_MASK (mode);
527 if (arg0 == 0)
528 {
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
532 val = GET_MODE_BITSIZE (mode);
533 }
534 else
535 val = exact_log2 (arg0 & -arg0);
536 break;
537
538 case POPCOUNT:
539 arg0 &= GET_MODE_MASK (mode);
540 val = 0;
541 while (arg0)
542 val++, arg0 &= arg0 - 1;
543 break;
544
545 case PARITY:
546 arg0 &= GET_MODE_MASK (mode);
547 val = 0;
548 while (arg0)
549 val++, arg0 &= arg0 - 1;
550 val &= 1;
551 break;
552
553 case TRUNCATE:
554 val = arg0;
555 break;
556
557 case ZERO_EXTEND:
558 /* When zero-extending a CONST_INT, we need to know its
559 original mode. */
560 if (op_mode == VOIDmode)
561 abort ();
562 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
563 {
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width != GET_MODE_BITSIZE (op_mode))
568 abort ();
569 val = arg0;
570 }
571 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
572 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
573 else
574 return 0;
575 break;
576
577 case SIGN_EXTEND:
578 if (op_mode == VOIDmode)
579 op_mode = mode;
580 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
581 {
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width != GET_MODE_BITSIZE (op_mode))
586 abort ();
587 val = arg0;
588 }
589 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
590 {
591 val
592 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
593 if (val
594 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
595 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 }
597 else
598 return 0;
599 break;
600
601 case SQRT:
602 case FLOAT_EXTEND:
603 case FLOAT_TRUNCATE:
604 case SS_TRUNCATE:
605 case US_TRUNCATE:
606 return 0;
607
608 default:
609 abort ();
610 }
611
612 val = trunc_int_for_mode (val, mode);
613
614 return GEN_INT (val);
615 }
616
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop) == VOIDmode
620 && width <= HOST_BITS_PER_WIDE_INT * 2
621 && (GET_CODE (trueop) == CONST_DOUBLE
622 || GET_CODE (trueop) == CONST_INT))
623 {
624 unsigned HOST_WIDE_INT l1, lv;
625 HOST_WIDE_INT h1, hv;
626
627 if (GET_CODE (trueop) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
629 else
630 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
631
632 switch (code)
633 {
634 case NOT:
635 lv = ~ l1;
636 hv = ~ h1;
637 break;
638
639 case NEG:
640 neg_double (l1, h1, &lv, &hv);
641 break;
642
643 case ABS:
644 if (h1 < 0)
645 neg_double (l1, h1, &lv, &hv);
646 else
647 lv = l1, hv = h1;
648 break;
649
650 case FFS:
651 hv = 0;
652 if (l1 == 0)
653 {
654 if (h1 == 0)
655 lv = 0;
656 else
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
658 }
659 else
660 lv = exact_log2 (l1 & -l1) + 1;
661 break;
662
663 case CLZ:
664 hv = 0;
665 if (h1 != 0)
666 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
667 - HOST_BITS_PER_WIDE_INT;
668 else if (l1 != 0)
669 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
671 lv = GET_MODE_BITSIZE (mode);
672 break;
673
674 case CTZ:
675 hv = 0;
676 if (l1 != 0)
677 lv = exact_log2 (l1 & -l1);
678 else if (h1 != 0)
679 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
682 break;
683
684 case POPCOUNT:
685 hv = 0;
686 lv = 0;
687 while (l1)
688 lv++, l1 &= l1 - 1;
689 while (h1)
690 lv++, h1 &= h1 - 1;
691 break;
692
693 case PARITY:
694 hv = 0;
695 lv = 0;
696 while (l1)
697 lv++, l1 &= l1 - 1;
698 while (h1)
699 lv++, h1 &= h1 - 1;
700 lv &= 1;
701 break;
702
703 case TRUNCATE:
704 /* This is just a change-of-mode, so do nothing. */
705 lv = l1, hv = h1;
706 break;
707
708 case ZERO_EXTEND:
709 if (op_mode == VOIDmode)
710 abort ();
711
712 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
713 return 0;
714
715 hv = 0;
716 lv = l1 & GET_MODE_MASK (op_mode);
717 break;
718
719 case SIGN_EXTEND:
720 if (op_mode == VOIDmode
721 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
722 return 0;
723 else
724 {
725 lv = l1 & GET_MODE_MASK (op_mode);
726 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
727 && (lv & ((HOST_WIDE_INT) 1
728 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
729 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
730
731 hv = HWI_SIGN_EXTEND (lv);
732 }
733 break;
734
735 case SQRT:
736 return 0;
737
738 default:
739 return 0;
740 }
741
742 return immed_double_const (lv, hv, mode);
743 }
744
745 else if (GET_CODE (trueop) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode) == MODE_FLOAT)
747 {
748 REAL_VALUE_TYPE d, t;
749 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
750
751 switch (code)
752 {
753 case SQRT:
754 if (HONOR_SNANS (mode) && real_isnan (&d))
755 return 0;
756 real_sqrt (&t, mode, &d);
757 d = t;
758 break;
759 case ABS:
760 d = REAL_VALUE_ABS (d);
761 break;
762 case NEG:
763 d = REAL_VALUE_NEGATE (d);
764 break;
765 case FLOAT_TRUNCATE:
766 d = real_value_truncate (mode, d);
767 break;
768 case FLOAT_EXTEND:
769 /* All this does is change the mode. */
770 break;
771 case FIX:
772 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
773 break;
774
775 default:
776 abort ();
777 }
778 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
779 }
780
781 else if (GET_CODE (trueop) == CONST_DOUBLE
782 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
783 && GET_MODE_CLASS (mode) == MODE_INT
784 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
785 {
786 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
787 operators are intentionally left unspecified (to ease implementation
788 by target backends), for consistency, this routine implements the
789 same semantics for constant folding as used by the middle-end. */
790
791 HOST_WIDE_INT xh, xl, th, tl;
792 REAL_VALUE_TYPE x, t;
793 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
794 switch (code)
795 {
796 case FIX:
797 if (REAL_VALUE_ISNAN (x))
798 return const0_rtx;
799
800 /* Test against the signed upper bound. */
801 if (width > HOST_BITS_PER_WIDE_INT)
802 {
803 th = ((unsigned HOST_WIDE_INT) 1
804 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
805 tl = -1;
806 }
807 else
808 {
809 th = 0;
810 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
811 }
812 real_from_integer (&t, VOIDmode, tl, th, 0);
813 if (REAL_VALUES_LESS (t, x))
814 {
815 xh = th;
816 xl = tl;
817 break;
818 }
819
820 /* Test against the signed lower bound. */
821 if (width > HOST_BITS_PER_WIDE_INT)
822 {
823 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
824 tl = 0;
825 }
826 else
827 {
828 th = -1;
829 tl = (HOST_WIDE_INT) -1 << (width - 1);
830 }
831 real_from_integer (&t, VOIDmode, tl, th, 0);
832 if (REAL_VALUES_LESS (x, t))
833 {
834 xh = th;
835 xl = tl;
836 break;
837 }
838 REAL_VALUE_TO_INT (&xl, &xh, x);
839 break;
840
841 case UNSIGNED_FIX:
842 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
843 return const0_rtx;
844
845 /* Test against the unsigned upper bound. */
846 if (width == 2*HOST_BITS_PER_WIDE_INT)
847 {
848 th = -1;
849 tl = -1;
850 }
851 else if (width >= HOST_BITS_PER_WIDE_INT)
852 {
853 th = ((unsigned HOST_WIDE_INT) 1
854 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
855 tl = -1;
856 }
857 else
858 {
859 th = 0;
860 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
861 }
862 real_from_integer (&t, VOIDmode, tl, th, 1);
863 if (REAL_VALUES_LESS (t, x))
864 {
865 xh = th;
866 xl = tl;
867 break;
868 }
869
870 REAL_VALUE_TO_INT (&xl, &xh, x);
871 break;
872
873 default:
874 abort ();
875 }
876 return immed_double_const (xl, xh, mode);
877 }
878
879 /* This was formerly used only for non-IEEE float.
880 eggert@twinsun.com says it is safe for IEEE also. */
881 else
882 {
883 enum rtx_code reversed;
884 rtx temp;
885
886 /* There are some simplifications we can do even if the operands
887 aren't constant. */
888 switch (code)
889 {
890 case NOT:
891 /* (not (not X)) == X. */
892 if (GET_CODE (op) == NOT)
893 return XEXP (op, 0);
894
895 /* (not (eq X Y)) == (ne X Y), etc. */
896 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
897 && (mode == BImode || STORE_FLAG_VALUE == -1)
898 && ((reversed = reversed_comparison_code (op, NULL_RTX))
899 != UNKNOWN))
900 return simplify_gen_relational (reversed, mode, VOIDmode,
901 XEXP (op, 0), XEXP (op, 1));
902
903 /* (not (plus X -1)) can become (neg X). */
904 if (GET_CODE (op) == PLUS
905 && XEXP (op, 1) == constm1_rtx)
906 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
907
908 /* Similarly, (not (neg X)) is (plus X -1). */
909 if (GET_CODE (op) == NEG)
910 return plus_constant (XEXP (op, 0), -1);
911
912 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
913 if (GET_CODE (op) == XOR
914 && GET_CODE (XEXP (op, 1)) == CONST_INT
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1),
917 mode)) != 0)
918 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
919
920
921 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
922 operands other than 1, but that is not valid. We could do a
923 similar simplification for (not (lshiftrt C X)) where C is
924 just the sign bit, but this doesn't seem common enough to
925 bother with. */
926 if (GET_CODE (op) == ASHIFT
927 && XEXP (op, 0) == const1_rtx)
928 {
929 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
930 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
931 }
932
933 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
934 by reversing the comparison code if valid. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_RTX_CLASS (GET_CODE (op)) == '<'
937 && (reversed = reversed_comparison_code (op, NULL_RTX))
938 != UNKNOWN)
939 return simplify_gen_relational (reversed, mode, VOIDmode,
940 XEXP (op, 0), XEXP (op, 1));
941
942 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
943 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
944 so we can perform the above simplification. */
945
946 if (STORE_FLAG_VALUE == -1
947 && GET_CODE (op) == ASHIFTRT
948 && GET_CODE (XEXP (op, 1)) == CONST_INT
949 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
950 return simplify_gen_relational (GE, mode, VOIDmode,
951 XEXP (op, 0), const0_rtx);
952
953 break;
954
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
959
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
964
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return plus_constant (XEXP (op, 0), 1);
968
969 /* (neg (minus X Y)) can become (minus Y X). This transformation
970 isn't safe for modes with signed zeros, since if X and Y are
971 both +0, (minus Y X) is the same as (minus X Y). If the
972 rounding mode is towards +infinity (or -infinity) then the two
973 expressions will be rounded differently. */
974 if (GET_CODE (op) == MINUS
975 && !HONOR_SIGNED_ZEROS (mode)
976 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
977 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
978 XEXP (op, 0));
979
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
983 {
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (GET_CODE (XEXP (op, 1)) == CONST_INT
986 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
987 {
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
989 mode);
990 if (temp)
991 return simplify_gen_binary (MINUS, mode, temp,
992 XEXP (op, 0));
993 }
994
995 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
996 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
997 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
998 }
999
1000 /* (neg (mult A B)) becomes (mult (neg A) B).
1001 This works even for floating-point values. */
1002 if (GET_CODE (op) == MULT
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 {
1005 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1006 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1007 }
1008
1009 /* NEG commutes with ASHIFT since it is multiplication. Only do
1010 this if we can then eliminate the NEG (e.g., if the operand
1011 is a constant). */
1012 if (GET_CODE (op) == ASHIFT)
1013 {
1014 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1015 mode);
1016 if (temp)
1017 return simplify_gen_binary (ASHIFT, mode, temp,
1018 XEXP (op, 1));
1019 }
1020
1021 break;
1022
1023 case SIGN_EXTEND:
1024 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1025 becomes just the MINUS if its mode is MODE. This allows
1026 folding switch statements on machines using casesi (such as
1027 the VAX). */
1028 if (GET_CODE (op) == TRUNCATE
1029 && GET_MODE (XEXP (op, 0)) == mode
1030 && GET_CODE (XEXP (op, 0)) == MINUS
1031 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1032 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1033 return XEXP (op, 0);
1034
1035 /* Check for a sign extension of a subreg of a promoted
1036 variable, where the promotion is sign-extended, and the
1037 target mode is the same as the variable's promotion. */
1038 if (GET_CODE (op) == SUBREG
1039 && SUBREG_PROMOTED_VAR_P (op)
1040 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1041 && GET_MODE (XEXP (op, 0)) == mode)
1042 return XEXP (op, 0);
1043
1044 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1045 if (! POINTERS_EXTEND_UNSIGNED
1046 && mode == Pmode && GET_MODE (op) == ptr_mode
1047 && (CONSTANT_P (op)
1048 || (GET_CODE (op) == SUBREG
1049 && GET_CODE (SUBREG_REG (op)) == REG
1050 && REG_POINTER (SUBREG_REG (op))
1051 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1052 return convert_memory_address (Pmode, op);
1053 #endif
1054 break;
1055
1056 case ZERO_EXTEND:
1057 /* Check for a zero extension of a subreg of a promoted
1058 variable, where the promotion is zero-extended, and the
1059 target mode is the same as the variable's promotion. */
1060 if (GET_CODE (op) == SUBREG
1061 && SUBREG_PROMOTED_VAR_P (op)
1062 && SUBREG_PROMOTED_UNSIGNED_P (op)
1063 && GET_MODE (XEXP (op, 0)) == mode)
1064 return XEXP (op, 0);
1065
1066 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1067 if (POINTERS_EXTEND_UNSIGNED > 0
1068 && mode == Pmode && GET_MODE (op) == ptr_mode
1069 && (CONSTANT_P (op)
1070 || (GET_CODE (op) == SUBREG
1071 && GET_CODE (SUBREG_REG (op)) == REG
1072 && REG_POINTER (SUBREG_REG (op))
1073 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1074 return convert_memory_address (Pmode, op);
1075 #endif
1076 break;
1077
1078 default:
1079 break;
1080 }
1081
1082 return 0;
1083 }
1084 }
1085 \f
1086 /* Subroutine of simplify_binary_operation to simplify a commutative,
1087 associative binary operation CODE with result mode MODE, operating
1088 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1089 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1090 canonicalization is possible. */
1091
1092 static rtx
1093 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1094 rtx op0, rtx op1)
1095 {
1096 rtx tem;
1097
1098 /* Linearize the operator to the left. */
1099 if (GET_CODE (op1) == code)
1100 {
1101 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1102 if (GET_CODE (op0) == code)
1103 {
1104 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1105 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1106 }
1107
1108 /* "a op (b op c)" becomes "(b op c) op a". */
1109 if (! swap_commutative_operands_p (op1, op0))
1110 return simplify_gen_binary (code, mode, op1, op0);
1111
1112 tem = op0;
1113 op0 = op1;
1114 op1 = tem;
1115 }
1116
1117 if (GET_CODE (op0) == code)
1118 {
1119 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1120 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1121 {
1122 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1123 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1124 }
1125
1126 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1127 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1128 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1129 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1130 if (tem != 0)
1131 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1132
1133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1134 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1135 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1136 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1137 if (tem != 0)
1138 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1139 }
1140
1141 return 0;
1142 }
1143
1144 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1145 and OP1. Return 0 if no simplification is possible.
1146
1147 Don't use this for relational operations such as EQ or LT.
1148 Use simplify_relational_operation instead. */
1149 rtx
1150 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1151 rtx op0, rtx op1)
1152 {
1153 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1154 HOST_WIDE_INT val;
1155 unsigned int width = GET_MODE_BITSIZE (mode);
1156 rtx trueop0, trueop1;
1157 rtx tem;
1158
1159 /* Relational operations don't work here. We must know the mode
1160 of the operands in order to do the comparison correctly.
1161 Assuming a full word can give incorrect results.
1162 Consider comparing 128 with -128 in QImode. */
1163
1164 if (GET_RTX_CLASS (code) == '<')
1165 abort ();
1166
1167 /* Make sure the constant is second. */
1168 if (GET_RTX_CLASS (code) == 'c'
1169 && swap_commutative_operands_p (op0, op1))
1170 {
1171 tem = op0, op0 = op1, op1 = tem;
1172 }
1173
1174 trueop0 = avoid_constant_pool_reference (op0);
1175 trueop1 = avoid_constant_pool_reference (op1);
1176
1177 if (VECTOR_MODE_P (mode)
1178 && GET_CODE (trueop0) == CONST_VECTOR
1179 && GET_CODE (trueop1) == CONST_VECTOR)
1180 {
1181 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1182 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1183 enum machine_mode op0mode = GET_MODE (trueop0);
1184 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1185 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1186 enum machine_mode op1mode = GET_MODE (trueop1);
1187 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1188 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1189 rtvec v = rtvec_alloc (n_elts);
1190 unsigned int i;
1191
1192 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1193 abort ();
1194
1195 for (i = 0; i < n_elts; i++)
1196 {
1197 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1198 CONST_VECTOR_ELT (trueop0, i),
1199 CONST_VECTOR_ELT (trueop1, i));
1200 if (!x)
1201 return 0;
1202 RTVEC_ELT (v, i) = x;
1203 }
1204
1205 return gen_rtx_CONST_VECTOR (mode, v);
1206 }
1207
1208 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1209 && GET_CODE (trueop0) == CONST_DOUBLE
1210 && GET_CODE (trueop1) == CONST_DOUBLE
1211 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1212 {
1213 REAL_VALUE_TYPE f0, f1, value;
1214
1215 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1216 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1217 f0 = real_value_truncate (mode, f0);
1218 f1 = real_value_truncate (mode, f1);
1219
1220 if (HONOR_SNANS (mode)
1221 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1222 return 0;
1223
1224 if (code == DIV
1225 && REAL_VALUES_EQUAL (f1, dconst0)
1226 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1227 return 0;
1228
1229 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1230
1231 value = real_value_truncate (mode, value);
1232 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1233 }
1234
1235 /* We can fold some multi-word operations. */
1236 if (GET_MODE_CLASS (mode) == MODE_INT
1237 && width == HOST_BITS_PER_WIDE_INT * 2
1238 && (GET_CODE (trueop0) == CONST_DOUBLE
1239 || GET_CODE (trueop0) == CONST_INT)
1240 && (GET_CODE (trueop1) == CONST_DOUBLE
1241 || GET_CODE (trueop1) == CONST_INT))
1242 {
1243 unsigned HOST_WIDE_INT l1, l2, lv;
1244 HOST_WIDE_INT h1, h2, hv;
1245
1246 if (GET_CODE (trueop0) == CONST_DOUBLE)
1247 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1248 else
1249 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1250
1251 if (GET_CODE (trueop1) == CONST_DOUBLE)
1252 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1253 else
1254 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1255
1256 switch (code)
1257 {
1258 case MINUS:
1259 /* A - B == A + (-B). */
1260 neg_double (l2, h2, &lv, &hv);
1261 l2 = lv, h2 = hv;
1262
1263 /* Fall through.... */
1264
1265 case PLUS:
1266 add_double (l1, h1, l2, h2, &lv, &hv);
1267 break;
1268
1269 case MULT:
1270 mul_double (l1, h1, l2, h2, &lv, &hv);
1271 break;
1272
1273 case DIV: case MOD: case UDIV: case UMOD:
1274 /* We'd need to include tree.h to do this and it doesn't seem worth
1275 it. */
1276 return 0;
1277
1278 case AND:
1279 lv = l1 & l2, hv = h1 & h2;
1280 break;
1281
1282 case IOR:
1283 lv = l1 | l2, hv = h1 | h2;
1284 break;
1285
1286 case XOR:
1287 lv = l1 ^ l2, hv = h1 ^ h2;
1288 break;
1289
1290 case SMIN:
1291 if (h1 < h2
1292 || (h1 == h2
1293 && ((unsigned HOST_WIDE_INT) l1
1294 < (unsigned HOST_WIDE_INT) l2)))
1295 lv = l1, hv = h1;
1296 else
1297 lv = l2, hv = h2;
1298 break;
1299
1300 case SMAX:
1301 if (h1 > h2
1302 || (h1 == h2
1303 && ((unsigned HOST_WIDE_INT) l1
1304 > (unsigned HOST_WIDE_INT) l2)))
1305 lv = l1, hv = h1;
1306 else
1307 lv = l2, hv = h2;
1308 break;
1309
1310 case UMIN:
1311 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1312 || (h1 == h2
1313 && ((unsigned HOST_WIDE_INT) l1
1314 < (unsigned HOST_WIDE_INT) l2)))
1315 lv = l1, hv = h1;
1316 else
1317 lv = l2, hv = h2;
1318 break;
1319
1320 case UMAX:
1321 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1322 || (h1 == h2
1323 && ((unsigned HOST_WIDE_INT) l1
1324 > (unsigned HOST_WIDE_INT) l2)))
1325 lv = l1, hv = h1;
1326 else
1327 lv = l2, hv = h2;
1328 break;
1329
1330 case LSHIFTRT: case ASHIFTRT:
1331 case ASHIFT:
1332 case ROTATE: case ROTATERT:
1333 #ifdef SHIFT_COUNT_TRUNCATED
1334 if (SHIFT_COUNT_TRUNCATED)
1335 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1336 #endif
1337
1338 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1339 return 0;
1340
1341 if (code == LSHIFTRT || code == ASHIFTRT)
1342 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1343 code == ASHIFTRT);
1344 else if (code == ASHIFT)
1345 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1346 else if (code == ROTATE)
1347 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1348 else /* code == ROTATERT */
1349 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1350 break;
1351
1352 default:
1353 return 0;
1354 }
1355
1356 return immed_double_const (lv, hv, mode);
1357 }
1358
1359 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1360 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1361 {
1362 /* Even if we can't compute a constant result,
1363 there are some cases worth simplifying. */
1364
1365 switch (code)
1366 {
1367 case PLUS:
1368 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1369 when x is NaN, infinite, or finite and nonzero. They aren't
1370 when x is -0 and the rounding mode is not towards -infinity,
1371 since (-0) + 0 is then 0. */
1372 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1373 return op0;
1374
1375 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1376 transformations are safe even for IEEE. */
1377 if (GET_CODE (op0) == NEG)
1378 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1379 else if (GET_CODE (op1) == NEG)
1380 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1381
1382 /* (~a) + 1 -> -a */
1383 if (INTEGRAL_MODE_P (mode)
1384 && GET_CODE (op0) == NOT
1385 && trueop1 == const1_rtx)
1386 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1387
1388 /* Handle both-operands-constant cases. We can only add
1389 CONST_INTs to constants since the sum of relocatable symbols
1390 can't be handled by most assemblers. Don't add CONST_INT
1391 to CONST_INT since overflow won't be computed properly if wider
1392 than HOST_BITS_PER_WIDE_INT. */
1393
1394 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1395 && GET_CODE (op1) == CONST_INT)
1396 return plus_constant (op0, INTVAL (op1));
1397 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1398 && GET_CODE (op0) == CONST_INT)
1399 return plus_constant (op1, INTVAL (op0));
1400
1401 /* See if this is something like X * C - X or vice versa or
1402 if the multiplication is written as a shift. If so, we can
1403 distribute and make a new multiply, shift, or maybe just
1404 have X (if C is 2 in the example above). But don't make
1405 real multiply if we didn't have one before. */
1406
1407 if (! FLOAT_MODE_P (mode))
1408 {
1409 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1410 rtx lhs = op0, rhs = op1;
1411 int had_mult = 0;
1412
1413 if (GET_CODE (lhs) == NEG)
1414 coeff0 = -1, lhs = XEXP (lhs, 0);
1415 else if (GET_CODE (lhs) == MULT
1416 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1417 {
1418 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1419 had_mult = 1;
1420 }
1421 else if (GET_CODE (lhs) == ASHIFT
1422 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1423 && INTVAL (XEXP (lhs, 1)) >= 0
1424 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1425 {
1426 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1427 lhs = XEXP (lhs, 0);
1428 }
1429
1430 if (GET_CODE (rhs) == NEG)
1431 coeff1 = -1, rhs = XEXP (rhs, 0);
1432 else if (GET_CODE (rhs) == MULT
1433 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1434 {
1435 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1436 had_mult = 1;
1437 }
1438 else if (GET_CODE (rhs) == ASHIFT
1439 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1440 && INTVAL (XEXP (rhs, 1)) >= 0
1441 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1442 {
1443 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1444 rhs = XEXP (rhs, 0);
1445 }
1446
1447 if (rtx_equal_p (lhs, rhs))
1448 {
1449 tem = simplify_gen_binary (MULT, mode, lhs,
1450 GEN_INT (coeff0 + coeff1));
1451 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1452 }
1453 }
1454
1455 /* If one of the operands is a PLUS or a MINUS, see if we can
1456 simplify this by the associative law.
1457 Don't use the associative law for floating point.
1458 The inaccuracy makes it nonassociative,
1459 and subtle programs can break if operations are associated. */
1460
1461 if (INTEGRAL_MODE_P (mode)
1462 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1463 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1464 || (GET_CODE (op0) == CONST
1465 && GET_CODE (XEXP (op0, 0)) == PLUS)
1466 || (GET_CODE (op1) == CONST
1467 && GET_CODE (XEXP (op1, 0)) == PLUS))
1468 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1469 return tem;
1470
1471 /* Reassociate floating point addition only when the user
1472 specifies unsafe math optimizations. */
1473 if (FLOAT_MODE_P (mode)
1474 && flag_unsafe_math_optimizations)
1475 {
1476 tem = simplify_associative_operation (code, mode, op0, op1);
1477 if (tem)
1478 return tem;
1479 }
1480 break;
1481
1482 case COMPARE:
1483 #ifdef HAVE_cc0
1484 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1485 using cc0, in which case we want to leave it as a COMPARE
1486 so we can distinguish it from a register-register-copy.
1487
1488 In IEEE floating point, x-0 is not the same as x. */
1489
1490 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1491 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1492 && trueop1 == CONST0_RTX (mode))
1493 return op0;
1494 #endif
1495
1496 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1497 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1498 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1499 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1500 {
1501 rtx xop00 = XEXP (op0, 0);
1502 rtx xop10 = XEXP (op1, 0);
1503
1504 #ifdef HAVE_cc0
1505 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1506 #else
1507 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1508 && GET_MODE (xop00) == GET_MODE (xop10)
1509 && REGNO (xop00) == REGNO (xop10)
1510 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1511 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1512 #endif
1513 return xop00;
1514 }
1515 break;
1516
1517 case MINUS:
1518 /* We can't assume x-x is 0 even with non-IEEE floating point,
1519 but since it is zero except in very strange circumstances, we
1520 will treat it as zero with -funsafe-math-optimizations. */
1521 if (rtx_equal_p (trueop0, trueop1)
1522 && ! side_effects_p (op0)
1523 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1524 return CONST0_RTX (mode);
1525
1526 /* Change subtraction from zero into negation. (0 - x) is the
1527 same as -x when x is NaN, infinite, or finite and nonzero.
1528 But if the mode has signed zeros, and does not round towards
1529 -infinity, then 0 - 0 is 0, not -0. */
1530 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1531 return simplify_gen_unary (NEG, mode, op1, mode);
1532
1533 /* (-1 - a) is ~a. */
1534 if (trueop0 == constm1_rtx)
1535 return simplify_gen_unary (NOT, mode, op1, mode);
1536
1537 /* Subtracting 0 has no effect unless the mode has signed zeros
1538 and supports rounding towards -infinity. In such a case,
1539 0 - 0 is -0. */
1540 if (!(HONOR_SIGNED_ZEROS (mode)
1541 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1542 && trueop1 == CONST0_RTX (mode))
1543 return op0;
1544
1545 /* See if this is something like X * C - X or vice versa or
1546 if the multiplication is written as a shift. If so, we can
1547 distribute and make a new multiply, shift, or maybe just
1548 have X (if C is 2 in the example above). But don't make
1549 real multiply if we didn't have one before. */
1550
1551 if (! FLOAT_MODE_P (mode))
1552 {
1553 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1554 rtx lhs = op0, rhs = op1;
1555 int had_mult = 0;
1556
1557 if (GET_CODE (lhs) == NEG)
1558 coeff0 = -1, lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == MULT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1561 {
1562 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1563 had_mult = 1;
1564 }
1565 else if (GET_CODE (lhs) == ASHIFT
1566 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1567 && INTVAL (XEXP (lhs, 1)) >= 0
1568 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1569 {
1570 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1571 lhs = XEXP (lhs, 0);
1572 }
1573
1574 if (GET_CODE (rhs) == NEG)
1575 coeff1 = - 1, rhs = XEXP (rhs, 0);
1576 else if (GET_CODE (rhs) == MULT
1577 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1578 {
1579 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1580 had_mult = 1;
1581 }
1582 else if (GET_CODE (rhs) == ASHIFT
1583 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1584 && INTVAL (XEXP (rhs, 1)) >= 0
1585 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1586 {
1587 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1588 rhs = XEXP (rhs, 0);
1589 }
1590
1591 if (rtx_equal_p (lhs, rhs))
1592 {
1593 tem = simplify_gen_binary (MULT, mode, lhs,
1594 GEN_INT (coeff0 - coeff1));
1595 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1596 }
1597 }
1598
1599 /* (a - (-b)) -> (a + b). True even for IEEE. */
1600 if (GET_CODE (op1) == NEG)
1601 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1602
1603 /* (-x - c) may be simplified as (-c - x). */
1604 if (GET_CODE (op0) == NEG
1605 && (GET_CODE (op1) == CONST_INT
1606 || GET_CODE (op1) == CONST_DOUBLE))
1607 {
1608 tem = simplify_unary_operation (NEG, mode, op1, mode);
1609 if (tem)
1610 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1611 }
1612
1613 /* If one of the operands is a PLUS or a MINUS, see if we can
1614 simplify this by the associative law.
1615 Don't use the associative law for floating point.
1616 The inaccuracy makes it nonassociative,
1617 and subtle programs can break if operations are associated. */
1618
1619 if (INTEGRAL_MODE_P (mode)
1620 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1621 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1622 || (GET_CODE (op0) == CONST
1623 && GET_CODE (XEXP (op0, 0)) == PLUS)
1624 || (GET_CODE (op1) == CONST
1625 && GET_CODE (XEXP (op1, 0)) == PLUS))
1626 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1627 return tem;
1628
1629 /* Don't let a relocatable value get a negative coeff. */
1630 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1631 return simplify_gen_binary (PLUS, mode,
1632 op0,
1633 neg_const_int (mode, op1));
1634
1635 /* (x - (x & y)) -> (x & ~y) */
1636 if (GET_CODE (op1) == AND)
1637 {
1638 if (rtx_equal_p (op0, XEXP (op1, 0)))
1639 {
1640 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1641 GET_MODE (XEXP (op1, 1)));
1642 return simplify_gen_binary (AND, mode, op0, tem);
1643 }
1644 if (rtx_equal_p (op0, XEXP (op1, 1)))
1645 {
1646 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1647 GET_MODE (XEXP (op1, 0)));
1648 return simplify_gen_binary (AND, mode, op0, tem);
1649 }
1650 }
1651 break;
1652
1653 case MULT:
1654 if (trueop1 == constm1_rtx)
1655 return simplify_gen_unary (NEG, mode, op0, mode);
1656
1657 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1658 x is NaN, since x * 0 is then also NaN. Nor is it valid
1659 when the mode has signed zeros, since multiplying a negative
1660 number by 0 will give -0, not 0. */
1661 if (!HONOR_NANS (mode)
1662 && !HONOR_SIGNED_ZEROS (mode)
1663 && trueop1 == CONST0_RTX (mode)
1664 && ! side_effects_p (op0))
1665 return op1;
1666
1667 /* In IEEE floating point, x*1 is not equivalent to x for
1668 signalling NaNs. */
1669 if (!HONOR_SNANS (mode)
1670 && trueop1 == CONST1_RTX (mode))
1671 return op0;
1672
1673 /* Convert multiply by constant power of two into shift unless
1674 we are still generating RTL. This test is a kludge. */
1675 if (GET_CODE (trueop1) == CONST_INT
1676 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1677 /* If the mode is larger than the host word size, and the
1678 uppermost bit is set, then this isn't a power of two due
1679 to implicit sign extension. */
1680 && (width <= HOST_BITS_PER_WIDE_INT
1681 || val != HOST_BITS_PER_WIDE_INT - 1)
1682 && ! rtx_equal_function_value_matters)
1683 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1684
1685 /* x*2 is x+x and x*(-1) is -x */
1686 if (GET_CODE (trueop1) == CONST_DOUBLE
1687 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1688 && GET_MODE (op0) == mode)
1689 {
1690 REAL_VALUE_TYPE d;
1691 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1692
1693 if (REAL_VALUES_EQUAL (d, dconst2))
1694 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1695
1696 if (REAL_VALUES_EQUAL (d, dconstm1))
1697 return simplify_gen_unary (NEG, mode, op0, mode);
1698 }
1699
1700 /* Reassociate multiplication, but for floating point MULTs
1701 only when the user specifies unsafe math optimizations. */
1702 if (! FLOAT_MODE_P (mode)
1703 || flag_unsafe_math_optimizations)
1704 {
1705 tem = simplify_associative_operation (code, mode, op0, op1);
1706 if (tem)
1707 return tem;
1708 }
1709 break;
1710
1711 case IOR:
1712 if (trueop1 == const0_rtx)
1713 return op0;
1714 if (GET_CODE (trueop1) == CONST_INT
1715 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1716 == GET_MODE_MASK (mode)))
1717 return op1;
1718 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1719 return op0;
1720 /* A | (~A) -> -1 */
1721 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1722 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1723 && ! side_effects_p (op0)
1724 && GET_MODE_CLASS (mode) != MODE_CC)
1725 return constm1_rtx;
1726 tem = simplify_associative_operation (code, mode, op0, op1);
1727 if (tem)
1728 return tem;
1729 break;
1730
1731 case XOR:
1732 if (trueop1 == const0_rtx)
1733 return op0;
1734 if (GET_CODE (trueop1) == CONST_INT
1735 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1736 == GET_MODE_MASK (mode)))
1737 return simplify_gen_unary (NOT, mode, op0, mode);
1738 if (trueop0 == trueop1 && ! side_effects_p (op0)
1739 && GET_MODE_CLASS (mode) != MODE_CC)
1740 return const0_rtx;
1741 tem = simplify_associative_operation (code, mode, op0, op1);
1742 if (tem)
1743 return tem;
1744 break;
1745
1746 case AND:
1747 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1748 return const0_rtx;
1749 if (GET_CODE (trueop1) == CONST_INT
1750 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1751 == GET_MODE_MASK (mode)))
1752 return op0;
1753 if (trueop0 == trueop1 && ! side_effects_p (op0)
1754 && GET_MODE_CLASS (mode) != MODE_CC)
1755 return op0;
1756 /* A & (~A) -> 0 */
1757 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1758 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1759 && ! side_effects_p (op0)
1760 && GET_MODE_CLASS (mode) != MODE_CC)
1761 return const0_rtx;
1762 tem = simplify_associative_operation (code, mode, op0, op1);
1763 if (tem)
1764 return tem;
1765 break;
1766
1767 case UDIV:
1768 /* Convert divide by power of two into shift (divide by 1 handled
1769 below). */
1770 if (GET_CODE (trueop1) == CONST_INT
1771 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1772 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1773
1774 /* Fall through.... */
1775
1776 case DIV:
1777 if (trueop1 == CONST1_RTX (mode))
1778 {
1779 /* On some platforms DIV uses narrower mode than its
1780 operands. */
1781 rtx x = gen_lowpart_common (mode, op0);
1782 if (x)
1783 return x;
1784 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1785 return gen_lowpart_SUBREG (mode, op0);
1786 else
1787 return op0;
1788 }
1789
1790 /* Maybe change 0 / x to 0. This transformation isn't safe for
1791 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1792 Nor is it safe for modes with signed zeros, since dividing
1793 0 by a negative number gives -0, not 0. */
1794 if (!HONOR_NANS (mode)
1795 && !HONOR_SIGNED_ZEROS (mode)
1796 && trueop0 == CONST0_RTX (mode)
1797 && ! side_effects_p (op1))
1798 return op0;
1799
1800 /* Change division by a constant into multiplication. Only do
1801 this with -funsafe-math-optimizations. */
1802 else if (GET_CODE (trueop1) == CONST_DOUBLE
1803 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1804 && trueop1 != CONST0_RTX (mode)
1805 && flag_unsafe_math_optimizations)
1806 {
1807 REAL_VALUE_TYPE d;
1808 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1809
1810 if (! REAL_VALUES_EQUAL (d, dconst0))
1811 {
1812 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1813 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1814 return simplify_gen_binary (MULT, mode, op0, tem);
1815 }
1816 }
1817 break;
1818
1819 case UMOD:
1820 /* Handle modulus by power of two (mod with 1 handled below). */
1821 if (GET_CODE (trueop1) == CONST_INT
1822 && exact_log2 (INTVAL (trueop1)) > 0)
1823 return simplify_gen_binary (AND, mode, op0,
1824 GEN_INT (INTVAL (op1) - 1));
1825
1826 /* Fall through.... */
1827
1828 case MOD:
1829 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1830 && ! side_effects_p (op0) && ! side_effects_p (op1))
1831 return const0_rtx;
1832 break;
1833
1834 case ROTATERT:
1835 case ROTATE:
1836 case ASHIFTRT:
1837 /* Rotating ~0 always results in ~0. */
1838 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1839 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1840 && ! side_effects_p (op1))
1841 return op0;
1842
1843 /* Fall through.... */
1844
1845 case ASHIFT:
1846 case LSHIFTRT:
1847 if (trueop1 == const0_rtx)
1848 return op0;
1849 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1850 return op0;
1851 break;
1852
1853 case SMIN:
1854 if (width <= HOST_BITS_PER_WIDE_INT
1855 && GET_CODE (trueop1) == CONST_INT
1856 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1857 && ! side_effects_p (op0))
1858 return op1;
1859 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1860 return op0;
1861 tem = simplify_associative_operation (code, mode, op0, op1);
1862 if (tem)
1863 return tem;
1864 break;
1865
1866 case SMAX:
1867 if (width <= HOST_BITS_PER_WIDE_INT
1868 && GET_CODE (trueop1) == CONST_INT
1869 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1870 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1871 && ! side_effects_p (op0))
1872 return op1;
1873 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1874 return op0;
1875 tem = simplify_associative_operation (code, mode, op0, op1);
1876 if (tem)
1877 return tem;
1878 break;
1879
1880 case UMIN:
1881 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1882 return op1;
1883 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1884 return op0;
1885 tem = simplify_associative_operation (code, mode, op0, op1);
1886 if (tem)
1887 return tem;
1888 break;
1889
1890 case UMAX:
1891 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1892 return op1;
1893 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1894 return op0;
1895 tem = simplify_associative_operation (code, mode, op0, op1);
1896 if (tem)
1897 return tem;
1898 break;
1899
1900 case SS_PLUS:
1901 case US_PLUS:
1902 case SS_MINUS:
1903 case US_MINUS:
1904 /* ??? There are simplifications that can be done. */
1905 return 0;
1906
1907 case VEC_SELECT:
1908 if (!VECTOR_MODE_P (mode))
1909 {
1910 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1911 || (mode
1912 != GET_MODE_INNER (GET_MODE (trueop0)))
1913 || GET_CODE (trueop1) != PARALLEL
1914 || XVECLEN (trueop1, 0) != 1
1915 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1916 abort ();
1917
1918 if (GET_CODE (trueop0) == CONST_VECTOR)
1919 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1920 }
1921 else
1922 {
1923 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1924 || (GET_MODE_INNER (mode)
1925 != GET_MODE_INNER (GET_MODE (trueop0)))
1926 || GET_CODE (trueop1) != PARALLEL)
1927 abort ();
1928
1929 if (GET_CODE (trueop0) == CONST_VECTOR)
1930 {
1931 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1932 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1933 rtvec v = rtvec_alloc (n_elts);
1934 unsigned int i;
1935
1936 if (XVECLEN (trueop1, 0) != (int) n_elts)
1937 abort ();
1938 for (i = 0; i < n_elts; i++)
1939 {
1940 rtx x = XVECEXP (trueop1, 0, i);
1941
1942 if (GET_CODE (x) != CONST_INT)
1943 abort ();
1944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1945 }
1946
1947 return gen_rtx_CONST_VECTOR (mode, v);
1948 }
1949 }
1950 return 0;
1951 case VEC_CONCAT:
1952 {
1953 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1954 ? GET_MODE (trueop0)
1955 : GET_MODE_INNER (mode));
1956 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1957 ? GET_MODE (trueop1)
1958 : GET_MODE_INNER (mode));
1959
1960 if (!VECTOR_MODE_P (mode)
1961 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1962 != GET_MODE_SIZE (mode)))
1963 abort ();
1964
1965 if ((VECTOR_MODE_P (op0_mode)
1966 && (GET_MODE_INNER (mode)
1967 != GET_MODE_INNER (op0_mode)))
1968 || (!VECTOR_MODE_P (op0_mode)
1969 && GET_MODE_INNER (mode) != op0_mode))
1970 abort ();
1971
1972 if ((VECTOR_MODE_P (op1_mode)
1973 && (GET_MODE_INNER (mode)
1974 != GET_MODE_INNER (op1_mode)))
1975 || (!VECTOR_MODE_P (op1_mode)
1976 && GET_MODE_INNER (mode) != op1_mode))
1977 abort ();
1978
1979 if ((GET_CODE (trueop0) == CONST_VECTOR
1980 || GET_CODE (trueop0) == CONST_INT
1981 || GET_CODE (trueop0) == CONST_DOUBLE)
1982 && (GET_CODE (trueop1) == CONST_VECTOR
1983 || GET_CODE (trueop1) == CONST_INT
1984 || GET_CODE (trueop1) == CONST_DOUBLE))
1985 {
1986 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1987 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1988 rtvec v = rtvec_alloc (n_elts);
1989 unsigned int i;
1990 unsigned in_n_elts = 1;
1991
1992 if (VECTOR_MODE_P (op0_mode))
1993 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1994 for (i = 0; i < n_elts; i++)
1995 {
1996 if (i < in_n_elts)
1997 {
1998 if (!VECTOR_MODE_P (op0_mode))
1999 RTVEC_ELT (v, i) = trueop0;
2000 else
2001 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2002 }
2003 else
2004 {
2005 if (!VECTOR_MODE_P (op1_mode))
2006 RTVEC_ELT (v, i) = trueop1;
2007 else
2008 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2009 i - in_n_elts);
2010 }
2011 }
2012
2013 return gen_rtx_CONST_VECTOR (mode, v);
2014 }
2015 }
2016 return 0;
2017
2018 default:
2019 abort ();
2020 }
2021
2022 return 0;
2023 }
2024
2025 /* Get the integer argument values in two forms:
2026 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2027
2028 arg0 = INTVAL (trueop0);
2029 arg1 = INTVAL (trueop1);
2030
2031 if (width < HOST_BITS_PER_WIDE_INT)
2032 {
2033 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2034 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2035
2036 arg0s = arg0;
2037 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2038 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2039
2040 arg1s = arg1;
2041 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2042 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2043 }
2044 else
2045 {
2046 arg0s = arg0;
2047 arg1s = arg1;
2048 }
2049
2050 /* Compute the value of the arithmetic. */
2051
2052 switch (code)
2053 {
2054 case PLUS:
2055 val = arg0s + arg1s;
2056 break;
2057
2058 case MINUS:
2059 val = arg0s - arg1s;
2060 break;
2061
2062 case MULT:
2063 val = arg0s * arg1s;
2064 break;
2065
2066 case DIV:
2067 if (arg1s == 0
2068 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2069 && arg1s == -1))
2070 return 0;
2071 val = arg0s / arg1s;
2072 break;
2073
2074 case MOD:
2075 if (arg1s == 0
2076 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2077 && arg1s == -1))
2078 return 0;
2079 val = arg0s % arg1s;
2080 break;
2081
2082 case UDIV:
2083 if (arg1 == 0
2084 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2085 && arg1s == -1))
2086 return 0;
2087 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2088 break;
2089
2090 case UMOD:
2091 if (arg1 == 0
2092 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2093 && arg1s == -1))
2094 return 0;
2095 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2096 break;
2097
2098 case AND:
2099 val = arg0 & arg1;
2100 break;
2101
2102 case IOR:
2103 val = arg0 | arg1;
2104 break;
2105
2106 case XOR:
2107 val = arg0 ^ arg1;
2108 break;
2109
2110 case LSHIFTRT:
2111 /* If shift count is undefined, don't fold it; let the machine do
2112 what it wants. But truncate it if the machine will do that. */
2113 if (arg1 < 0)
2114 return 0;
2115
2116 #ifdef SHIFT_COUNT_TRUNCATED
2117 if (SHIFT_COUNT_TRUNCATED)
2118 arg1 %= width;
2119 #endif
2120
2121 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2122 break;
2123
2124 case ASHIFT:
2125 if (arg1 < 0)
2126 return 0;
2127
2128 #ifdef SHIFT_COUNT_TRUNCATED
2129 if (SHIFT_COUNT_TRUNCATED)
2130 arg1 %= width;
2131 #endif
2132
2133 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2134 break;
2135
2136 case ASHIFTRT:
2137 if (arg1 < 0)
2138 return 0;
2139
2140 #ifdef SHIFT_COUNT_TRUNCATED
2141 if (SHIFT_COUNT_TRUNCATED)
2142 arg1 %= width;
2143 #endif
2144
2145 val = arg0s >> arg1;
2146
2147 /* Bootstrap compiler may not have sign extended the right shift.
2148 Manually extend the sign to insure bootstrap cc matches gcc. */
2149 if (arg0s < 0 && arg1 > 0)
2150 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2151
2152 break;
2153
2154 case ROTATERT:
2155 if (arg1 < 0)
2156 return 0;
2157
2158 arg1 %= width;
2159 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2160 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2161 break;
2162
2163 case ROTATE:
2164 if (arg1 < 0)
2165 return 0;
2166
2167 arg1 %= width;
2168 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2169 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2170 break;
2171
2172 case COMPARE:
2173 /* Do nothing here. */
2174 return 0;
2175
2176 case SMIN:
2177 val = arg0s <= arg1s ? arg0s : arg1s;
2178 break;
2179
2180 case UMIN:
2181 val = ((unsigned HOST_WIDE_INT) arg0
2182 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2183 break;
2184
2185 case SMAX:
2186 val = arg0s > arg1s ? arg0s : arg1s;
2187 break;
2188
2189 case UMAX:
2190 val = ((unsigned HOST_WIDE_INT) arg0
2191 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2192 break;
2193
2194 case SS_PLUS:
2195 case US_PLUS:
2196 case SS_MINUS:
2197 case US_MINUS:
2198 /* ??? There are simplifications that can be done. */
2199 return 0;
2200
2201 default:
2202 abort ();
2203 }
2204
2205 val = trunc_int_for_mode (val, mode);
2206
2207 return GEN_INT (val);
2208 }
2209 \f
2210 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2211 PLUS or MINUS.
2212
2213 Rather than test for specific case, we do this by a brute-force method
2214 and do all possible simplifications until no more changes occur. Then
2215 we rebuild the operation.
2216
2217 If FORCE is true, then always generate the rtx. This is used to
2218 canonicalize stuff emitted from simplify_gen_binary. Note that this
2219 can still fail if the rtx is too complex. It won't fail just because
2220 the result is not 'simpler' than the input, however. */
2221
2222 struct simplify_plus_minus_op_data
2223 {
2224 rtx op;
2225 int neg;
2226 };
2227
2228 static int
2229 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2230 {
2231 const struct simplify_plus_minus_op_data *d1 = p1;
2232 const struct simplify_plus_minus_op_data *d2 = p2;
2233
2234 return (commutative_operand_precedence (d2->op)
2235 - commutative_operand_precedence (d1->op));
2236 }
2237
2238 static rtx
2239 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2240 rtx op1, int force)
2241 {
2242 struct simplify_plus_minus_op_data ops[8];
2243 rtx result, tem;
2244 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2245 int first, changed;
2246 int i, j;
2247
2248 memset (ops, 0, sizeof ops);
2249
2250 /* Set up the two operands and then expand them until nothing has been
2251 changed. If we run out of room in our array, give up; this should
2252 almost never happen. */
2253
2254 ops[0].op = op0;
2255 ops[0].neg = 0;
2256 ops[1].op = op1;
2257 ops[1].neg = (code == MINUS);
2258
2259 do
2260 {
2261 changed = 0;
2262
2263 for (i = 0; i < n_ops; i++)
2264 {
2265 rtx this_op = ops[i].op;
2266 int this_neg = ops[i].neg;
2267 enum rtx_code this_code = GET_CODE (this_op);
2268
2269 switch (this_code)
2270 {
2271 case PLUS:
2272 case MINUS:
2273 if (n_ops == 7)
2274 return NULL_RTX;
2275
2276 ops[n_ops].op = XEXP (this_op, 1);
2277 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2278 n_ops++;
2279
2280 ops[i].op = XEXP (this_op, 0);
2281 input_ops++;
2282 changed = 1;
2283 break;
2284
2285 case NEG:
2286 ops[i].op = XEXP (this_op, 0);
2287 ops[i].neg = ! this_neg;
2288 changed = 1;
2289 break;
2290
2291 case CONST:
2292 if (n_ops < 7
2293 && GET_CODE (XEXP (this_op, 0)) == PLUS
2294 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2295 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2296 {
2297 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2298 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2299 ops[n_ops].neg = this_neg;
2300 n_ops++;
2301 input_consts++;
2302 changed = 1;
2303 }
2304 break;
2305
2306 case NOT:
2307 /* ~a -> (-a - 1) */
2308 if (n_ops != 7)
2309 {
2310 ops[n_ops].op = constm1_rtx;
2311 ops[n_ops++].neg = this_neg;
2312 ops[i].op = XEXP (this_op, 0);
2313 ops[i].neg = !this_neg;
2314 changed = 1;
2315 }
2316 break;
2317
2318 case CONST_INT:
2319 if (this_neg)
2320 {
2321 ops[i].op = neg_const_int (mode, this_op);
2322 ops[i].neg = 0;
2323 changed = 1;
2324 }
2325 break;
2326
2327 default:
2328 break;
2329 }
2330 }
2331 }
2332 while (changed);
2333
2334 /* If we only have two operands, we can't do anything. */
2335 if (n_ops <= 2 && !force)
2336 return NULL_RTX;
2337
2338 /* Count the number of CONSTs we didn't split above. */
2339 for (i = 0; i < n_ops; i++)
2340 if (GET_CODE (ops[i].op) == CONST)
2341 input_consts++;
2342
2343 /* Now simplify each pair of operands until nothing changes. The first
2344 time through just simplify constants against each other. */
2345
2346 first = 1;
2347 do
2348 {
2349 changed = first;
2350
2351 for (i = 0; i < n_ops - 1; i++)
2352 for (j = i + 1; j < n_ops; j++)
2353 {
2354 rtx lhs = ops[i].op, rhs = ops[j].op;
2355 int lneg = ops[i].neg, rneg = ops[j].neg;
2356
2357 if (lhs != 0 && rhs != 0
2358 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2359 {
2360 enum rtx_code ncode = PLUS;
2361
2362 if (lneg != rneg)
2363 {
2364 ncode = MINUS;
2365 if (lneg)
2366 tem = lhs, lhs = rhs, rhs = tem;
2367 }
2368 else if (swap_commutative_operands_p (lhs, rhs))
2369 tem = lhs, lhs = rhs, rhs = tem;
2370
2371 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2372
2373 /* Reject "simplifications" that just wrap the two
2374 arguments in a CONST. Failure to do so can result
2375 in infinite recursion with simplify_binary_operation
2376 when it calls us to simplify CONST operations. */
2377 if (tem
2378 && ! (GET_CODE (tem) == CONST
2379 && GET_CODE (XEXP (tem, 0)) == ncode
2380 && XEXP (XEXP (tem, 0), 0) == lhs
2381 && XEXP (XEXP (tem, 0), 1) == rhs)
2382 /* Don't allow -x + -1 -> ~x simplifications in the
2383 first pass. This allows us the chance to combine
2384 the -1 with other constants. */
2385 && ! (first
2386 && GET_CODE (tem) == NOT
2387 && XEXP (tem, 0) == rhs))
2388 {
2389 lneg &= rneg;
2390 if (GET_CODE (tem) == NEG)
2391 tem = XEXP (tem, 0), lneg = !lneg;
2392 if (GET_CODE (tem) == CONST_INT && lneg)
2393 tem = neg_const_int (mode, tem), lneg = 0;
2394
2395 ops[i].op = tem;
2396 ops[i].neg = lneg;
2397 ops[j].op = NULL_RTX;
2398 changed = 1;
2399 }
2400 }
2401 }
2402
2403 first = 0;
2404 }
2405 while (changed);
2406
2407 /* Pack all the operands to the lower-numbered entries. */
2408 for (i = 0, j = 0; j < n_ops; j++)
2409 if (ops[j].op)
2410 ops[i++] = ops[j];
2411 n_ops = i;
2412
2413 /* Sort the operations based on swap_commutative_operands_p. */
2414 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2415
2416 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2417 if (n_ops == 2
2418 && GET_CODE (ops[1].op) == CONST_INT
2419 && CONSTANT_P (ops[0].op)
2420 && ops[0].neg)
2421 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2422
2423 /* We suppressed creation of trivial CONST expressions in the
2424 combination loop to avoid recursion. Create one manually now.
2425 The combination loop should have ensured that there is exactly
2426 one CONST_INT, and the sort will have ensured that it is last
2427 in the array and that any other constant will be next-to-last. */
2428
2429 if (n_ops > 1
2430 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2431 && CONSTANT_P (ops[n_ops - 2].op))
2432 {
2433 rtx value = ops[n_ops - 1].op;
2434 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2435 value = neg_const_int (mode, value);
2436 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2437 n_ops--;
2438 }
2439
2440 /* Count the number of CONSTs that we generated. */
2441 n_consts = 0;
2442 for (i = 0; i < n_ops; i++)
2443 if (GET_CODE (ops[i].op) == CONST)
2444 n_consts++;
2445
2446 /* Give up if we didn't reduce the number of operands we had. Make
2447 sure we count a CONST as two operands. If we have the same
2448 number of operands, but have made more CONSTs than before, this
2449 is also an improvement, so accept it. */
2450 if (!force
2451 && (n_ops + n_consts > input_ops
2452 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2453 return NULL_RTX;
2454
2455 /* Put a non-negated operand first, if possible. */
2456
2457 for (i = 0; i < n_ops && ops[i].neg; i++)
2458 continue;
2459 if (i == n_ops)
2460 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2461 else if (i != 0)
2462 {
2463 tem = ops[0].op;
2464 ops[0] = ops[i];
2465 ops[i].op = tem;
2466 ops[i].neg = 1;
2467 }
2468
2469 /* Now make the result by performing the requested operations. */
2470 result = ops[0].op;
2471 for (i = 1; i < n_ops; i++)
2472 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2473 mode, result, ops[i].op);
2474
2475 return result;
2476 }
2477
2478 /* Like simplify_binary_operation except used for relational operators.
2479 MODE is the mode of the operands, not that of the result. If MODE
2480 is VOIDmode, both operands must also be VOIDmode and we compare the
2481 operands in "infinite precision".
2482
2483 If no simplification is possible, this function returns zero. Otherwise,
2484 it returns either const_true_rtx or const0_rtx. */
2485
2486 rtx
2487 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2488 rtx op0, rtx op1)
2489 {
2490 int equal, op0lt, op0ltu, op1lt, op1ltu;
2491 rtx tem;
2492 rtx trueop0;
2493 rtx trueop1;
2494
2495 if (mode == VOIDmode
2496 && (GET_MODE (op0) != VOIDmode
2497 || GET_MODE (op1) != VOIDmode))
2498 abort ();
2499
2500 /* If op0 is a compare, extract the comparison arguments from it. */
2501 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2502 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2503
2504 /* We can't simplify MODE_CC values since we don't know what the
2505 actual comparison is. */
2506 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2507 return 0;
2508
2509 /* Make sure the constant is second. */
2510 if (swap_commutative_operands_p (op0, op1))
2511 {
2512 tem = op0, op0 = op1, op1 = tem;
2513 code = swap_condition (code);
2514 }
2515
2516 trueop0 = avoid_constant_pool_reference (op0);
2517 trueop1 = avoid_constant_pool_reference (op1);
2518
2519 /* For integer comparisons of A and B maybe we can simplify A - B and can
2520 then simplify a comparison of that with zero. If A and B are both either
2521 a register or a CONST_INT, this can't help; testing for these cases will
2522 prevent infinite recursion here and speed things up.
2523
2524 If CODE is an unsigned comparison, then we can never do this optimization,
2525 because it gives an incorrect result if the subtraction wraps around zero.
2526 ANSI C defines unsigned operations such that they never overflow, and
2527 thus such cases can not be ignored. */
2528
2529 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2530 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2531 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2532 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2533 && code != GTU && code != GEU && code != LTU && code != LEU)
2534 return simplify_relational_operation (signed_condition (code),
2535 mode, tem, const0_rtx);
2536
2537 if (flag_unsafe_math_optimizations && code == ORDERED)
2538 return const_true_rtx;
2539
2540 if (flag_unsafe_math_optimizations && code == UNORDERED)
2541 return const0_rtx;
2542
2543 /* For modes without NaNs, if the two operands are equal, we know the
2544 result except if they have side-effects. */
2545 if (! HONOR_NANS (GET_MODE (trueop0))
2546 && rtx_equal_p (trueop0, trueop1)
2547 && ! side_effects_p (trueop0))
2548 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2549
2550 /* If the operands are floating-point constants, see if we can fold
2551 the result. */
2552 else if (GET_CODE (trueop0) == CONST_DOUBLE
2553 && GET_CODE (trueop1) == CONST_DOUBLE
2554 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2555 {
2556 REAL_VALUE_TYPE d0, d1;
2557
2558 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2559 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2560
2561 /* Comparisons are unordered iff at least one of the values is NaN. */
2562 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2563 switch (code)
2564 {
2565 case UNEQ:
2566 case UNLT:
2567 case UNGT:
2568 case UNLE:
2569 case UNGE:
2570 case NE:
2571 case UNORDERED:
2572 return const_true_rtx;
2573 case EQ:
2574 case LT:
2575 case GT:
2576 case LE:
2577 case GE:
2578 case LTGT:
2579 case ORDERED:
2580 return const0_rtx;
2581 default:
2582 return 0;
2583 }
2584
2585 equal = REAL_VALUES_EQUAL (d0, d1);
2586 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2587 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2588 }
2589
2590 /* Otherwise, see if the operands are both integers. */
2591 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2592 && (GET_CODE (trueop0) == CONST_DOUBLE
2593 || GET_CODE (trueop0) == CONST_INT)
2594 && (GET_CODE (trueop1) == CONST_DOUBLE
2595 || GET_CODE (trueop1) == CONST_INT))
2596 {
2597 int width = GET_MODE_BITSIZE (mode);
2598 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2599 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2600
2601 /* Get the two words comprising each integer constant. */
2602 if (GET_CODE (trueop0) == CONST_DOUBLE)
2603 {
2604 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2605 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2606 }
2607 else
2608 {
2609 l0u = l0s = INTVAL (trueop0);
2610 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2611 }
2612
2613 if (GET_CODE (trueop1) == CONST_DOUBLE)
2614 {
2615 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2616 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2617 }
2618 else
2619 {
2620 l1u = l1s = INTVAL (trueop1);
2621 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2622 }
2623
2624 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2625 we have to sign or zero-extend the values. */
2626 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2627 {
2628 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2629 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2630
2631 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2632 l0s |= ((HOST_WIDE_INT) (-1) << width);
2633
2634 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2635 l1s |= ((HOST_WIDE_INT) (-1) << width);
2636 }
2637 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2638 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2639
2640 equal = (h0u == h1u && l0u == l1u);
2641 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2642 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2643 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2644 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2645 }
2646
2647 /* Otherwise, there are some code-specific tests we can make. */
2648 else
2649 {
2650 switch (code)
2651 {
2652 case EQ:
2653 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2654 return const0_rtx;
2655 break;
2656
2657 case NE:
2658 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2659 return const_true_rtx;
2660 break;
2661
2662 case GEU:
2663 /* Unsigned values are never negative. */
2664 if (trueop1 == const0_rtx)
2665 return const_true_rtx;
2666 break;
2667
2668 case LTU:
2669 if (trueop1 == const0_rtx)
2670 return const0_rtx;
2671 break;
2672
2673 case LEU:
2674 /* Unsigned values are never greater than the largest
2675 unsigned value. */
2676 if (GET_CODE (trueop1) == CONST_INT
2677 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2678 && INTEGRAL_MODE_P (mode))
2679 return const_true_rtx;
2680 break;
2681
2682 case GTU:
2683 if (GET_CODE (trueop1) == CONST_INT
2684 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2685 && INTEGRAL_MODE_P (mode))
2686 return const0_rtx;
2687 break;
2688
2689 case LT:
2690 /* Optimize abs(x) < 0.0. */
2691 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2692 {
2693 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2694 : trueop0;
2695 if (GET_CODE (tem) == ABS)
2696 return const0_rtx;
2697 }
2698 break;
2699
2700 case GE:
2701 /* Optimize abs(x) >= 0.0. */
2702 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2703 {
2704 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2705 : trueop0;
2706 if (GET_CODE (tem) == ABS)
2707 return const_true_rtx;
2708 }
2709 break;
2710
2711 case UNGE:
2712 /* Optimize ! (abs(x) < 0.0). */
2713 if (trueop1 == CONST0_RTX (mode))
2714 {
2715 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2716 : trueop0;
2717 if (GET_CODE (tem) == ABS)
2718 return const_true_rtx;
2719 }
2720 break;
2721
2722 default:
2723 break;
2724 }
2725
2726 return 0;
2727 }
2728
2729 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2730 as appropriate. */
2731 switch (code)
2732 {
2733 case EQ:
2734 case UNEQ:
2735 return equal ? const_true_rtx : const0_rtx;
2736 case NE:
2737 case LTGT:
2738 return ! equal ? const_true_rtx : const0_rtx;
2739 case LT:
2740 case UNLT:
2741 return op0lt ? const_true_rtx : const0_rtx;
2742 case GT:
2743 case UNGT:
2744 return op1lt ? const_true_rtx : const0_rtx;
2745 case LTU:
2746 return op0ltu ? const_true_rtx : const0_rtx;
2747 case GTU:
2748 return op1ltu ? const_true_rtx : const0_rtx;
2749 case LE:
2750 case UNLE:
2751 return equal || op0lt ? const_true_rtx : const0_rtx;
2752 case GE:
2753 case UNGE:
2754 return equal || op1lt ? const_true_rtx : const0_rtx;
2755 case LEU:
2756 return equal || op0ltu ? const_true_rtx : const0_rtx;
2757 case GEU:
2758 return equal || op1ltu ? const_true_rtx : const0_rtx;
2759 case ORDERED:
2760 return const_true_rtx;
2761 case UNORDERED:
2762 return const0_rtx;
2763 default:
2764 abort ();
2765 }
2766 }
2767 \f
2768 /* Simplify CODE, an operation with result mode MODE and three operands,
2769 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2770 a constant. Return 0 if no simplifications is possible. */
2771
2772 rtx
2773 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2774 enum machine_mode op0_mode, rtx op0, rtx op1,
2775 rtx op2)
2776 {
2777 unsigned int width = GET_MODE_BITSIZE (mode);
2778
2779 /* VOIDmode means "infinite" precision. */
2780 if (width == 0)
2781 width = HOST_BITS_PER_WIDE_INT;
2782
2783 switch (code)
2784 {
2785 case SIGN_EXTRACT:
2786 case ZERO_EXTRACT:
2787 if (GET_CODE (op0) == CONST_INT
2788 && GET_CODE (op1) == CONST_INT
2789 && GET_CODE (op2) == CONST_INT
2790 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2791 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2792 {
2793 /* Extracting a bit-field from a constant */
2794 HOST_WIDE_INT val = INTVAL (op0);
2795
2796 if (BITS_BIG_ENDIAN)
2797 val >>= (GET_MODE_BITSIZE (op0_mode)
2798 - INTVAL (op2) - INTVAL (op1));
2799 else
2800 val >>= INTVAL (op2);
2801
2802 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2803 {
2804 /* First zero-extend. */
2805 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2806 /* If desired, propagate sign bit. */
2807 if (code == SIGN_EXTRACT
2808 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2809 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2810 }
2811
2812 /* Clear the bits that don't belong in our mode,
2813 unless they and our sign bit are all one.
2814 So we get either a reasonable negative value or a reasonable
2815 unsigned value for this mode. */
2816 if (width < HOST_BITS_PER_WIDE_INT
2817 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2818 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2819 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2820
2821 return GEN_INT (val);
2822 }
2823 break;
2824
2825 case IF_THEN_ELSE:
2826 if (GET_CODE (op0) == CONST_INT)
2827 return op0 != const0_rtx ? op1 : op2;
2828
2829 /* Convert c ? a : a into "a". */
2830 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2831 return op1;
2832
2833 /* Convert a != b ? a : b into "a". */
2834 if (GET_CODE (op0) == NE
2835 && ! side_effects_p (op0)
2836 && ! HONOR_NANS (mode)
2837 && ! HONOR_SIGNED_ZEROS (mode)
2838 && ((rtx_equal_p (XEXP (op0, 0), op1)
2839 && rtx_equal_p (XEXP (op0, 1), op2))
2840 || (rtx_equal_p (XEXP (op0, 0), op2)
2841 && rtx_equal_p (XEXP (op0, 1), op1))))
2842 return op1;
2843
2844 /* Convert a == b ? a : b into "b". */
2845 if (GET_CODE (op0) == EQ
2846 && ! side_effects_p (op0)
2847 && ! HONOR_NANS (mode)
2848 && ! HONOR_SIGNED_ZEROS (mode)
2849 && ((rtx_equal_p (XEXP (op0, 0), op1)
2850 && rtx_equal_p (XEXP (op0, 1), op2))
2851 || (rtx_equal_p (XEXP (op0, 0), op2)
2852 && rtx_equal_p (XEXP (op0, 1), op1))))
2853 return op2;
2854
2855 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2856 {
2857 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2858 ? GET_MODE (XEXP (op0, 1))
2859 : GET_MODE (XEXP (op0, 0)));
2860 rtx temp;
2861 if (cmp_mode == VOIDmode)
2862 cmp_mode = op0_mode;
2863 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2864 XEXP (op0, 0), XEXP (op0, 1));
2865
2866 /* See if any simplifications were possible. */
2867 if (temp == const0_rtx)
2868 return op2;
2869 else if (temp == const_true_rtx)
2870 return op1;
2871 else if (temp)
2872 abort ();
2873
2874 /* Look for happy constants in op1 and op2. */
2875 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2876 {
2877 HOST_WIDE_INT t = INTVAL (op1);
2878 HOST_WIDE_INT f = INTVAL (op2);
2879
2880 if (t == STORE_FLAG_VALUE && f == 0)
2881 code = GET_CODE (op0);
2882 else if (t == 0 && f == STORE_FLAG_VALUE)
2883 {
2884 enum rtx_code tmp;
2885 tmp = reversed_comparison_code (op0, NULL_RTX);
2886 if (tmp == UNKNOWN)
2887 break;
2888 code = tmp;
2889 }
2890 else
2891 break;
2892
2893 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2894 }
2895 }
2896 break;
2897
2898 case VEC_MERGE:
2899 if (GET_MODE (op0) != mode
2900 || GET_MODE (op1) != mode
2901 || !VECTOR_MODE_P (mode))
2902 abort ();
2903 op2 = avoid_constant_pool_reference (op2);
2904 if (GET_CODE (op2) == CONST_INT)
2905 {
2906 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2907 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2908 int mask = (1 << n_elts) - 1;
2909
2910 if (!(INTVAL (op2) & mask))
2911 return op1;
2912 if ((INTVAL (op2) & mask) == mask)
2913 return op0;
2914
2915 op0 = avoid_constant_pool_reference (op0);
2916 op1 = avoid_constant_pool_reference (op1);
2917 if (GET_CODE (op0) == CONST_VECTOR
2918 && GET_CODE (op1) == CONST_VECTOR)
2919 {
2920 rtvec v = rtvec_alloc (n_elts);
2921 unsigned int i;
2922
2923 for (i = 0; i < n_elts; i++)
2924 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2925 ? CONST_VECTOR_ELT (op0, i)
2926 : CONST_VECTOR_ELT (op1, i));
2927 return gen_rtx_CONST_VECTOR (mode, v);
2928 }
2929 }
2930 break;
2931
2932 default:
2933 abort ();
2934 }
2935
2936 return 0;
2937 }
2938
2939 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2940 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2941
2942 Works by unpacking OP into a collection of 8-bit values
2943 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2944 and then repacking them again for OUTERMODE. */
2945
2946 static rtx
2947 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2948 enum machine_mode innermode, unsigned int byte)
2949 {
2950 /* We support up to 512-bit values (for V8DFmode). */
2951 enum {
2952 max_bitsize = 512,
2953 value_bit = 8,
2954 value_mask = (1 << value_bit) - 1
2955 };
2956 unsigned char value[max_bitsize / value_bit];
2957 int value_start;
2958 int i;
2959 int elem;
2960
2961 int num_elem;
2962 rtx * elems;
2963 int elem_bitsize;
2964 rtx result_s;
2965 rtvec result_v = NULL;
2966 enum mode_class outer_class;
2967 enum machine_mode outer_submode;
2968
2969 /* Some ports misuse CCmode. */
2970 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
2971 return op;
2972
2973 /* Unpack the value. */
2974
2975 if (GET_CODE (op) == CONST_VECTOR)
2976 {
2977 num_elem = CONST_VECTOR_NUNITS (op);
2978 elems = &CONST_VECTOR_ELT (op, 0);
2979 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
2980 }
2981 else
2982 {
2983 num_elem = 1;
2984 elems = &op;
2985 elem_bitsize = max_bitsize;
2986 }
2987
2988 if (BITS_PER_UNIT % value_bit != 0)
2989 abort (); /* Too complicated; reducing value_bit may help. */
2990 if (elem_bitsize % BITS_PER_UNIT != 0)
2991 abort (); /* I don't know how to handle endianness of sub-units. */
2992
2993 for (elem = 0; elem < num_elem; elem++)
2994 {
2995 unsigned char * vp;
2996 rtx el = elems[elem];
2997
2998 /* Vectors are kept in target memory order. (This is probably
2999 a mistake.) */
3000 {
3001 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3002 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3003 / BITS_PER_UNIT);
3004 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3005 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3006 unsigned bytele = (subword_byte % UNITS_PER_WORD
3007 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3008 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3009 }
3010
3011 switch (GET_CODE (el))
3012 {
3013 case CONST_INT:
3014 for (i = 0;
3015 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3016 i += value_bit)
3017 *vp++ = INTVAL (el) >> i;
3018 /* CONST_INTs are always logically sign-extended. */
3019 for (; i < elem_bitsize; i += value_bit)
3020 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3021 break;
3022
3023 case CONST_DOUBLE:
3024 if (GET_MODE (el) == VOIDmode)
3025 {
3026 /* If this triggers, someone should have generated a
3027 CONST_INT instead. */
3028 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3029 abort ();
3030
3031 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3032 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3033 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3034 {
3035 *vp++
3036 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3037 i += value_bit;
3038 }
3039 /* It shouldn't matter what's done here, so fill it with
3040 zero. */
3041 for (; i < max_bitsize; i += value_bit)
3042 *vp++ = 0;
3043 }
3044 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3045 {
3046 long tmp[max_bitsize / 32];
3047 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3048
3049 if (bitsize > elem_bitsize)
3050 abort ();
3051 if (bitsize % value_bit != 0)
3052 abort ();
3053
3054 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3055 GET_MODE (el));
3056
3057 /* real_to_target produces its result in words affected by
3058 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3059 and use WORDS_BIG_ENDIAN instead; see the documentation
3060 of SUBREG in rtl.texi. */
3061 for (i = 0; i < bitsize; i += value_bit)
3062 {
3063 int ibase;
3064 if (WORDS_BIG_ENDIAN)
3065 ibase = bitsize - 1 - i;
3066 else
3067 ibase = i;
3068 *vp++ = tmp[ibase / 32] >> i % 32;
3069 }
3070
3071 /* It shouldn't matter what's done here, so fill it with
3072 zero. */
3073 for (; i < elem_bitsize; i += value_bit)
3074 *vp++ = 0;
3075 }
3076 else
3077 abort ();
3078 break;
3079
3080 default:
3081 abort ();
3082 }
3083 }
3084
3085 /* Now, pick the right byte to start with. */
3086 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3087 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3088 will already have offset 0. */
3089 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3090 {
3091 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3092 - byte);
3093 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3094 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3095 byte = (subword_byte % UNITS_PER_WORD
3096 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3097 }
3098
3099 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3100 so if it's become negative it will instead be very large.) */
3101 if (byte >= GET_MODE_SIZE (innermode))
3102 abort ();
3103
3104 /* Convert from bytes to chunks of size value_bit. */
3105 value_start = byte * (BITS_PER_UNIT / value_bit);
3106
3107 /* Re-pack the value. */
3108
3109 if (VECTOR_MODE_P (outermode))
3110 {
3111 num_elem = GET_MODE_NUNITS (outermode);
3112 result_v = rtvec_alloc (num_elem);
3113 elems = &RTVEC_ELT (result_v, 0);
3114 outer_submode = GET_MODE_INNER (outermode);
3115 }
3116 else
3117 {
3118 num_elem = 1;
3119 elems = &result_s;
3120 outer_submode = outermode;
3121 }
3122
3123 outer_class = GET_MODE_CLASS (outer_submode);
3124 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3125
3126 if (elem_bitsize % value_bit != 0)
3127 abort ();
3128 if (elem_bitsize + value_start * value_bit > max_bitsize)
3129 abort ();
3130
3131 for (elem = 0; elem < num_elem; elem++)
3132 {
3133 unsigned char *vp;
3134
3135 /* Vectors are stored in target memory order. (This is probably
3136 a mistake.) */
3137 {
3138 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3139 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3140 / BITS_PER_UNIT);
3141 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3142 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3143 unsigned bytele = (subword_byte % UNITS_PER_WORD
3144 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3145 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3146 }
3147
3148 switch (outer_class)
3149 {
3150 case MODE_INT:
3151 case MODE_PARTIAL_INT:
3152 {
3153 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3154
3155 for (i = 0;
3156 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3157 i += value_bit)
3158 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3159 for (; i < elem_bitsize; i += value_bit)
3160 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3161 << (i - HOST_BITS_PER_WIDE_INT));
3162
3163 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3164 know why. */
3165 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3166 elems[elem] = gen_int_mode (lo, outer_submode);
3167 else
3168 elems[elem] = immed_double_const (lo, hi, outer_submode);
3169 }
3170 break;
3171
3172 case MODE_FLOAT:
3173 {
3174 REAL_VALUE_TYPE r;
3175 long tmp[max_bitsize / 32];
3176
3177 /* real_from_target wants its input in words affected by
3178 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3179 and use WORDS_BIG_ENDIAN instead; see the documentation
3180 of SUBREG in rtl.texi. */
3181 for (i = 0; i < max_bitsize / 32; i++)
3182 tmp[i] = 0;
3183 for (i = 0; i < elem_bitsize; i += value_bit)
3184 {
3185 int ibase;
3186 if (WORDS_BIG_ENDIAN)
3187 ibase = elem_bitsize - 1 - i;
3188 else
3189 ibase = i;
3190 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3191 }
3192
3193 real_from_target (&r, tmp, outer_submode);
3194 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3195 }
3196 break;
3197
3198 default:
3199 abort ();
3200 }
3201 }
3202 if (VECTOR_MODE_P (outermode))
3203 return gen_rtx_CONST_VECTOR (outermode, result_v);
3204 else
3205 return result_s;
3206 }
3207
3208 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3209 Return 0 if no simplifications are possible. */
3210 rtx
3211 simplify_subreg (enum machine_mode outermode, rtx op,
3212 enum machine_mode innermode, unsigned int byte)
3213 {
3214 /* Little bit of sanity checking. */
3215 if (innermode == VOIDmode || outermode == VOIDmode
3216 || innermode == BLKmode || outermode == BLKmode)
3217 abort ();
3218
3219 if (GET_MODE (op) != innermode
3220 && GET_MODE (op) != VOIDmode)
3221 abort ();
3222
3223 if (byte % GET_MODE_SIZE (outermode)
3224 || byte >= GET_MODE_SIZE (innermode))
3225 abort ();
3226
3227 if (outermode == innermode && !byte)
3228 return op;
3229
3230 if (GET_CODE (op) == CONST_INT
3231 || GET_CODE (op) == CONST_DOUBLE
3232 || GET_CODE (op) == CONST_VECTOR)
3233 return simplify_immed_subreg (outermode, op, innermode, byte);
3234
3235 /* Changing mode twice with SUBREG => just change it once,
3236 or not at all if changing back op starting mode. */
3237 if (GET_CODE (op) == SUBREG)
3238 {
3239 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3240 int final_offset = byte + SUBREG_BYTE (op);
3241 rtx new;
3242
3243 if (outermode == innermostmode
3244 && byte == 0 && SUBREG_BYTE (op) == 0)
3245 return SUBREG_REG (op);
3246
3247 /* The SUBREG_BYTE represents offset, as if the value were stored
3248 in memory. Irritating exception is paradoxical subreg, where
3249 we define SUBREG_BYTE to be 0. On big endian machines, this
3250 value should be negative. For a moment, undo this exception. */
3251 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3252 {
3253 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3254 if (WORDS_BIG_ENDIAN)
3255 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3256 if (BYTES_BIG_ENDIAN)
3257 final_offset += difference % UNITS_PER_WORD;
3258 }
3259 if (SUBREG_BYTE (op) == 0
3260 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3261 {
3262 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3263 if (WORDS_BIG_ENDIAN)
3264 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3265 if (BYTES_BIG_ENDIAN)
3266 final_offset += difference % UNITS_PER_WORD;
3267 }
3268
3269 /* See whether resulting subreg will be paradoxical. */
3270 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3271 {
3272 /* In nonparadoxical subregs we can't handle negative offsets. */
3273 if (final_offset < 0)
3274 return NULL_RTX;
3275 /* Bail out in case resulting subreg would be incorrect. */
3276 if (final_offset % GET_MODE_SIZE (outermode)
3277 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3278 return NULL_RTX;
3279 }
3280 else
3281 {
3282 int offset = 0;
3283 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3284
3285 /* In paradoxical subreg, see if we are still looking on lower part.
3286 If so, our SUBREG_BYTE will be 0. */
3287 if (WORDS_BIG_ENDIAN)
3288 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3289 if (BYTES_BIG_ENDIAN)
3290 offset += difference % UNITS_PER_WORD;
3291 if (offset == final_offset)
3292 final_offset = 0;
3293 else
3294 return NULL_RTX;
3295 }
3296
3297 /* Recurse for further possible simplifications. */
3298 new = simplify_subreg (outermode, SUBREG_REG (op),
3299 GET_MODE (SUBREG_REG (op)),
3300 final_offset);
3301 if (new)
3302 return new;
3303 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3304 }
3305
3306 /* SUBREG of a hard register => just change the register number
3307 and/or mode. If the hard register is not valid in that mode,
3308 suppress this simplification. If the hard register is the stack,
3309 frame, or argument pointer, leave this as a SUBREG. */
3310
3311 if (REG_P (op)
3312 && (! REG_FUNCTION_VALUE_P (op)
3313 || ! rtx_equal_function_value_matters)
3314 && REGNO (op) < FIRST_PSEUDO_REGISTER
3315 #ifdef CANNOT_CHANGE_MODE_CLASS
3316 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3317 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3318 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3319 #endif
3320 && ((reload_completed && !frame_pointer_needed)
3321 || (REGNO (op) != FRAME_POINTER_REGNUM
3322 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3323 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3324 #endif
3325 ))
3326 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3327 && REGNO (op) != ARG_POINTER_REGNUM
3328 #endif
3329 && REGNO (op) != STACK_POINTER_REGNUM
3330 && subreg_offset_representable_p (REGNO (op), innermode,
3331 byte, outermode))
3332 {
3333 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3334 int final_regno = subreg_hard_regno (tem, 0);
3335
3336 /* ??? We do allow it if the current REG is not valid for
3337 its mode. This is a kludge to work around how float/complex
3338 arguments are passed on 32-bit SPARC and should be fixed. */
3339 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3340 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3341 {
3342 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3343
3344 /* Propagate original regno. We don't have any way to specify
3345 the offset inside original regno, so do so only for lowpart.
3346 The information is used only by alias analysis that can not
3347 grog partial register anyway. */
3348
3349 if (subreg_lowpart_offset (outermode, innermode) == byte)
3350 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3351 return x;
3352 }
3353 }
3354
3355 /* If we have a SUBREG of a register that we are replacing and we are
3356 replacing it with a MEM, make a new MEM and try replacing the
3357 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3358 or if we would be widening it. */
3359
3360 if (GET_CODE (op) == MEM
3361 && ! mode_dependent_address_p (XEXP (op, 0))
3362 /* Allow splitting of volatile memory references in case we don't
3363 have instruction to move the whole thing. */
3364 && (! MEM_VOLATILE_P (op)
3365 || ! have_insn_for (SET, innermode))
3366 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3367 return adjust_address_nv (op, outermode, byte);
3368
3369 /* Handle complex values represented as CONCAT
3370 of real and imaginary part. */
3371 if (GET_CODE (op) == CONCAT)
3372 {
3373 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3374 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3375 unsigned int final_offset;
3376 rtx res;
3377
3378 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3379 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3380 if (res)
3381 return res;
3382 /* We can at least simplify it by referring directly to the relevant part. */
3383 return gen_rtx_SUBREG (outermode, part, final_offset);
3384 }
3385
3386 return NULL_RTX;
3387 }
3388
3389 /* Make a SUBREG operation or equivalent if it folds. */
3390
3391 rtx
3392 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3393 enum machine_mode innermode, unsigned int byte)
3394 {
3395 rtx new;
3396 /* Little bit of sanity checking. */
3397 if (innermode == VOIDmode || outermode == VOIDmode
3398 || innermode == BLKmode || outermode == BLKmode)
3399 abort ();
3400
3401 if (GET_MODE (op) != innermode
3402 && GET_MODE (op) != VOIDmode)
3403 abort ();
3404
3405 if (byte % GET_MODE_SIZE (outermode)
3406 || byte >= GET_MODE_SIZE (innermode))
3407 abort ();
3408
3409 if (GET_CODE (op) == QUEUED)
3410 return NULL_RTX;
3411
3412 new = simplify_subreg (outermode, op, innermode, byte);
3413 if (new)
3414 return new;
3415
3416 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3417 return NULL_RTX;
3418
3419 return gen_rtx_SUBREG (outermode, op, byte);
3420 }
3421 /* Simplify X, an rtx expression.
3422
3423 Return the simplified expression or NULL if no simplifications
3424 were possible.
3425
3426 This is the preferred entry point into the simplification routines;
3427 however, we still allow passes to call the more specific routines.
3428
3429 Right now GCC has three (yes, three) major bodies of RTL simplification
3430 code that need to be unified.
3431
3432 1. fold_rtx in cse.c. This code uses various CSE specific
3433 information to aid in RTL simplification.
3434
3435 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3436 it uses combine specific information to aid in RTL
3437 simplification.
3438
3439 3. The routines in this file.
3440
3441
3442 Long term we want to only have one body of simplification code; to
3443 get to that state I recommend the following steps:
3444
3445 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3446 which are not pass dependent state into these routines.
3447
3448 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3449 use this routine whenever possible.
3450
3451 3. Allow for pass dependent state to be provided to these
3452 routines and add simplifications based on the pass dependent
3453 state. Remove code from cse.c & combine.c that becomes
3454 redundant/dead.
3455
3456 It will take time, but ultimately the compiler will be easier to
3457 maintain and improve. It's totally silly that when we add a
3458 simplification that it needs to be added to 4 places (3 for RTL
3459 simplification and 1 for tree simplification. */
3460
3461 rtx
3462 simplify_rtx (rtx x)
3463 {
3464 enum rtx_code code = GET_CODE (x);
3465 enum machine_mode mode = GET_MODE (x);
3466 rtx temp;
3467
3468 switch (GET_RTX_CLASS (code))
3469 {
3470 case '1':
3471 return simplify_unary_operation (code, mode,
3472 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3473 case 'c':
3474 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3475 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3476
3477 /* Fall through.... */
3478
3479 case '2':
3480 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3481
3482 case '3':
3483 case 'b':
3484 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3485 XEXP (x, 0), XEXP (x, 1),
3486 XEXP (x, 2));
3487
3488 case '<':
3489 temp = simplify_relational_operation (code,
3490 ((GET_MODE (XEXP (x, 0))
3491 != VOIDmode)
3492 ? GET_MODE (XEXP (x, 0))
3493 : GET_MODE (XEXP (x, 1))),
3494 XEXP (x, 0), XEXP (x, 1));
3495 #ifdef FLOAT_STORE_FLAG_VALUE
3496 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3497 {
3498 if (temp == const0_rtx)
3499 temp = CONST0_RTX (mode);
3500 else
3501 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3502 mode);
3503 }
3504 #endif
3505 return temp;
3506
3507 case 'x':
3508 if (code == SUBREG)
3509 return simplify_gen_subreg (mode, SUBREG_REG (x),
3510 GET_MODE (SUBREG_REG (x)),
3511 SUBREG_BYTE (x));
3512 if (code == CONSTANT_P_RTX)
3513 {
3514 if (CONSTANT_P (XEXP (x, 0)))
3515 return const1_rtx;
3516 }
3517 break;
3518
3519 case 'o':
3520 if (code == LO_SUM)
3521 {
3522 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3523 if (GET_CODE (XEXP (x, 0)) == HIGH
3524 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3525 return XEXP (x, 1);
3526 }
3527 break;
3528
3529 default:
3530 break;
3531 }
3532 return NULL;
3533 }